From ae8faeb2fba9b35ff722fe1adf3d2934ed528d3d Mon Sep 17 00:00:00 2001 From: matthias Date: Fri, 20 Mar 2026 19:05:49 +0100 Subject: [PATCH 01/27] added ENV for postgres,pgbackrest,monitor & connectionPooler --- .../cpo.opensource.cybertec.at/v1/crds.go | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go index bf7a3934..c1d66afb 100644 --- a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go +++ b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go @@ -759,6 +759,16 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, + "env": { + Type: "array", + Nullable: true, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), + }, + }, + }, }, }, "preparedDatabases": { @@ -1378,6 +1388,16 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, + "env": { + Type: "array", + Nullable: true, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), + }, + }, + }, }, }, }, @@ -1465,6 +1485,16 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ "image": { Type: "string", }, + "env": { + Type: "array", + Nullable: true, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), + }, + }, + }, }, }, }, @@ -2311,6 +2341,16 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ "connection_pooler_user": { Type: "string", }, + "env": { + Type: "array", + Nullable: true, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), + }, + }, + }, }, }, "multisite": { From c9caf49af07d9fd08f58d41446f13cf8daaf1565 Mon Sep 17 00:00:00 2001 From: matthias Date: Fri, 20 Mar 2026 19:07:51 +0100 Subject: [PATCH 02/27] added ENV for postgres,pgbackrest,monitor & connectionPooler --- .../v1/postgresql_type.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go b/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go index 51387778..576dd85b 100644 --- a/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go +++ b/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go @@ -154,6 +154,7 @@ type AdditionalVolume struct { type PostgresqlParam struct { PgVersion string `json:"version"` Parameters map[string]string `json:"parameters,omitempty"` + Env []v1.EnvVar `json:"env,omitempty"` } // ResourceDescription describes CPU and memory resources defined for a cluster. @@ -246,12 +247,13 @@ type PostgresStatus struct { // makes sense to expose. E.g. pool size (min/max boundaries), max client // connections etc. type ConnectionPooler struct { - NumberOfInstances *int32 `json:"numberOfInstances,omitempty"` - Schema string `json:"schema,omitempty"` - User string `json:"user,omitempty"` - Mode string `json:"mode,omitempty"` - DockerImage string `json:"dockerImage,omitempty"` - MaxDBConnections *int32 `json:"maxDBConnections,omitempty"` + NumberOfInstances *int32 `json:"numberOfInstances,omitempty"` + Schema string `json:"schema,omitempty"` + User string `json:"user,omitempty"` + Mode string `json:"mode,omitempty"` + DockerImage string `json:"dockerImage,omitempty"` + MaxDBConnections *int32 `json:"maxDBConnections,omitempty"` + Env []v1.EnvVar `json:"env,omitempty"` *Resources `json:"resources,omitempty"` } @@ -285,6 +287,7 @@ type Pgbackrest struct { Restore Restore `json:"restore"` Configuration Configuration `json:"configuration"` Resources *Resources `json:"resources,omitempty"` + Env []v1.EnvVar `json:"env,omitempty"` } type PgbackrestClone struct { @@ -323,7 +326,8 @@ type TDE struct { // Monitoring Sidecar defines a container to be run in the same pod as the Postgres container. type Monitoring struct { - Image string `json:"image,omitempty"` + Image string `json:"image,omitempty"` + Env []v1.EnvVar `json:"env,omitempty"` } // Multisite enables cross Kubernetes replication coordinated via etcd From 7462e0a6b80efae31fcf8b5a0979540baa7eff37 Mon Sep 17 00:00:00 2001 From: matthias Date: Fri, 20 Mar 2026 19:18:34 +0100 Subject: [PATCH 03/27] added missing exporter-object and new env-objects --- .../postgres-operator/crds/postgresqls.yaml | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index 49627603..2b24b838 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -206,6 +206,12 @@ spec: type: string user: type: string + env: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true databases: type: object additionalProperties: @@ -375,6 +381,18 @@ spec: type: object additionalProperties: type: string + monitor: + nullable: true + properties: + image: + type: string + env: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: object nodeAffinity: type: object properties: @@ -553,6 +571,12 @@ spec: type: object additionalProperties: type: string + env: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true preparedDatabases: type: object additionalProperties: @@ -976,6 +1000,12 @@ spec: memory: type: string pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + env: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true required: - image - repos From f7c7ebda724b9c4c570ef173ca3384a76526320b Mon Sep 17 00:00:00 2001 From: matthias Date: Fri, 20 Mar 2026 19:43:17 +0100 Subject: [PATCH 04/27] added global and pg-specific env to pg-container --- pkg/cluster/k8sres.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index cccc7ee3..fe19e148 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1081,6 +1081,10 @@ func (c *Cluster) generateSpiloPodEnvVars( envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"}) } + // fetch postgres-specific variables that will override all subsequent global variables + if len(spec.PostgresqlParam.Env) > 0 { + envVars = appendEnvVars(envVars, spec.Env...) + } // fetch cluster-specific variables that will override all subsequent global variables if len(spec.Env) > 0 { envVars = appendEnvVars(envVars, spec.Env...) From 1780a19fbd4b3235067184239113c9db77d9e640 Mon Sep 17 00:00:00 2001 From: matthias Date: Fri, 20 Mar 2026 20:42:46 +0100 Subject: [PATCH 05/27] added ENVs to postgres and pgbackrest containers (container, init-container & job-container) --- pkg/cluster/k8sres.go | 57 ++++++++++++++++++++++++++++++-------- pkg/cluster/k8sres_test.go | 4 +-- pkg/cluster/sync.go | 2 +- 3 files changed, 49 insertions(+), 14 deletions(-) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index fe19e148..42e48f52 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -932,7 +932,7 @@ func (c *Cluster) generatePodTemplate( } // generatePodEnvVars generates environment variables for the Spilo Pod -func (c *Cluster) generateSpiloPodEnvVars( +func (c *Cluster) generatePostgresContainerEnvVars( spec *cpov1.PostgresSpec, uid types.UID, spiloConfiguration string) ([]v1.EnvVar, error) { @@ -1083,7 +1083,7 @@ func (c *Cluster) generateSpiloPodEnvVars( // fetch postgres-specific variables that will override all subsequent global variables if len(spec.PostgresqlParam.Env) > 0 { - envVars = appendEnvVars(envVars, spec.Env...) + envVars = appendEnvVars(envVars, spec.PostgresqlParam.Env...) } // fetch cluster-specific variables that will override all subsequent global variables if len(spec.Env) > 0 { @@ -1157,8 +1157,9 @@ func (c *Cluster) generateSpiloPodEnvVars( } // generatePodEnvVars generates environment variables for the Spilo Pod -func (c *Cluster) generatepgBackRestPodEnvVars() []v1.EnvVar { - return []v1.EnvVar{ +func (c *Cluster) generatepgBackRestPodEnvVars(spec *cpov1.PostgresSpec) ([]v1.EnvVar, error) { + + envVars := []v1.EnvVar{ { Name: "USE_PGBACKREST", Value: "true", @@ -1168,6 +1169,17 @@ func (c *Cluster) generatepgBackRestPodEnvVars() []v1.EnvVar { Value: "repo", }, } + + // fetch pgbackrest-specific variables that will override all subsequent global variables + if len(spec.Backup.Pgbackrest.Env) > 0 { + envVars = appendEnvVars(envVars, spec.Backup.Pgbackrest.Env...) + } + // fetch cluster-specific variables that will override all subsequent global variables + if len(spec.Env) > 0 { + envVars = appendEnvVars(envVars, spec.Env...) + } + + return envVars, nil } func copyEnvVars(envs []v1.EnvVar) []v1.EnvVar { @@ -1439,10 +1451,10 @@ func (c *Cluster) generateStatefulSet(spec *cpov1.PostgresSpec) (*appsv1.Statefu return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err) } - // generate environment variables for the spilo container - spiloEnvVars, err := c.generateSpiloPodEnvVars(spec, c.Postgresql.GetUID(), spiloConfiguration) + // generate environment variables for the postgres container + spiloEnvVars, err := c.generatePostgresContainerEnvVars(spec, c.Postgresql.GetUID(), spiloConfiguration) if err != nil { - return nil, fmt.Errorf("could not generate Spilo env vars: %v", err) + return nil, fmt.Errorf("could not generate Postgres-Container env vars: %v", err) } // pickup the docker image for the spilo container @@ -1783,6 +1795,7 @@ func (c *Cluster) generatePgbackrestRestoreContainer(spec *cpov1.PostgresSpec, r }, }, } + if repo_host_mode { pgbackrestRestoreEnvVars = appendEnvVars( pgbackrestRestoreEnvVars, v1.EnvVar{ @@ -1804,6 +1817,15 @@ func (c *Cluster) generatePgbackrestRestoreContainer(spec *cpov1.PostgresSpec, r }) } + // fetch pgbackrest-specific variables that will override all subsequent global variables + if len(spec.Backup.Pgbackrest.Env) > 0 { + pgbackrestRestoreEnvVars = appendEnvVars(pgbackrestRestoreEnvVars, spec.Backup.Pgbackrest.Env...) + } + // fetch cluster-specific variables that will override all subsequent global variables + if len(spec.Env) > 0 { + pgbackrestRestoreEnvVars = appendEnvVars(pgbackrestRestoreEnvVars, spec.Env...) + } + return v1.Container{ Name: constants.RestoreContainerName, Image: spec.Backup.Pgbackrest.Image, @@ -1838,7 +1860,10 @@ func (c *Cluster) generateRepoHostStatefulSet(spec *cpov1.PostgresSpec) (*appsv1 } // generate environment variables for the spilo container - repoEnvVars := c.generatepgBackRestPodEnvVars() + repoEnvVars, err := c.generatepgBackRestPodEnvVars(spec) + if err != nil { + return nil, fmt.Errorf("could not generate pgBackRest-RepoHost env vars: %v", err) + } // determine the User, Group and FSGroup for the spilo pod effectiveRunAsUser := c.OpConfig.Resources.SpiloRunAsUser @@ -3397,7 +3422,7 @@ func renderPgbackrestConfig(config map[string]map[string]string) (string, error) return out.String(), nil } -func (c *Cluster) generatePgbackrestJob(backup *cpov1.Pgbackrest, repo *cpov1.Repo, backupType string, schedule string) (*batchv1.CronJob, error) { +func (c *Cluster) generatePgbackrestJob(spec *cpov1.PostgresSpec, backup *cpov1.Pgbackrest, repo *cpov1.Repo, backupType string, schedule string) (*batchv1.CronJob, error) { var ( err error @@ -3413,7 +3438,7 @@ func (c *Cluster) generatePgbackrestJob(backup *cpov1.Pgbackrest, repo *cpov1.Re emptyResourceRequirements := v1.ResourceRequirements{} resourceRequirements = &emptyResourceRequirements - envVars := c.generatePgbackrestBackupJobEnvVars(repo, backupType) + envVars := c.generatePgbackrestBackupJobEnvVars(spec, repo, backupType) pgbackrestContainer := generateContainer( constants.BackupContainerName, &c.Postgresql.Spec.Backup.Pgbackrest.Image, @@ -3512,7 +3537,7 @@ func (c *Cluster) generatePgbackrestJob(backup *cpov1.Pgbackrest, repo *cpov1.Re return cronJob, nil } -func (c *Cluster) generatePgbackrestBackupJobEnvVars(repo *cpov1.Repo, backupType string) []v1.EnvVar { +func (c *Cluster) generatePgbackrestBackupJobEnvVars(spec *cpov1.PostgresSpec, repo *cpov1.Repo, backupType string) []v1.EnvVar { selector := c.roleLabelsSet(false, Master).String() targetContainer := constants.PostgresContainerName if repo.Storage == "pvc" { @@ -3544,6 +3569,16 @@ func (c *Cluster) generatePgbackrestBackupJobEnvVars(repo *cpov1.Repo, backupTyp Value: selector, }, } + + // fetch pgbackrest-specific variables that will override all subsequent global variables + if len(spec.Backup.Pgbackrest.Env) > 0 { + envVars = appendEnvVars(envVars, spec.Backup.Pgbackrest.Env...) + } + // fetch cluster-specific variables that will override all subsequent global variables + if len(spec.Env) > 0 { + envVars = appendEnvVars(envVars, spec.Env...) + } + return envVars } diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index a57be951..36225d88 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -517,7 +517,7 @@ func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) return nil } -func TestGenerateSpiloPodEnvVars(t *testing.T) { +func TestGeneratePostgresContainerEnvVars(t *testing.T) { var dummyUUID = "efd12e58-5786-11e8-b5a7-06148230260c" expectedClusterNameLabel := []ExpectedValue{ @@ -959,7 +959,7 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { pgsql.Spec.StandbyCluster = tt.standbyDescription c.Postgresql = pgsql - actualEnvs, err := c.generateSpiloPodEnvVars(&pgsql.Spec, types.UID(dummyUUID), exampleSpiloConfig) + actualEnvs, err := c.generatePostgresContainerEnvVars(&pgsql.Spec, types.UID(dummyUUID), exampleSpiloConfig) assert.NoError(t, err) for _, ev := range tt.expectedValues { diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 7ebcc8b8..cbfaba2b 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -1662,7 +1662,7 @@ func (c *Cluster) syncPgbackrestJob(forceRemove bool) error { for _, repo := range c.Postgresql.Spec.Backup.Pgbackrest.Repos { for name, schedule := range repo.Schedule { if rep == repo.Name && name == schedul { - job, err := c.generatePgbackrestJob(c.Postgresql.Spec.Backup.Pgbackrest, &repo, name, schedule) + job, err := c.generatePgbackrestJob(&c.Postgresql.Spec, c.Postgresql.Spec.Backup.Pgbackrest, &repo, name, schedule) if err != nil { return fmt.Errorf("could not generate pgbackrest job: %v", err) } From d6a907672aab2b49565b57a571e65d4a5001dca2 Mon Sep 17 00:00:00 2001 From: matthias Date: Fri, 20 Mar 2026 20:52:31 +0100 Subject: [PATCH 06/27] added ENVs to connection-pooler --- pkg/cluster/connection_pooler.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 27a310ef..28df9d1a 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -218,7 +218,7 @@ func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar { minSize := defaultSize / 2 reserveSize := minSize - return []v1.EnvVar{ + envVars := []v1.EnvVar{ { Name: "CONNECTION_POOLER_PORT", Value: fmt.Sprint(pgPort), @@ -248,6 +248,17 @@ func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar { Value: fmt.Sprint(maxDBConn), }, } + + // fetch connection_pooler-specific variables that will override all subsequent global variables + if len(connectionPoolerSpec.Env) > 0 { + envVars = appendEnvVars(envVars, connectionPoolerSpec.Env...) + } + // fetch cluster-specific variables that will override all subsequent global variables + if len(spec.Env) > 0 { + envVars = appendEnvVars(envVars, spec.Env...) + } + + return envVars } func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( From 908b35761835061c81ea530251aade6f74c3f560 Mon Sep 17 00:00:00 2001 From: matthias Date: Fri, 20 Mar 2026 20:53:28 +0100 Subject: [PATCH 07/27] added ENVs to postgres and pgbackrest containers (container, init-container & job-container) --- pkg/cluster/k8sres.go | 16 +++++++++++++--- pkg/cluster/resources.go | 2 +- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 42e48f52..e1f4756c 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -3143,8 +3143,8 @@ func (c *Cluster) getMonitoringSecretName() string { "tprgroup", cpo.GroupName) } -func (c *Cluster) generateMonitoringEnvVars() []v1.EnvVar { - env := []v1.EnvVar{ +func (c *Cluster) generateMonitoringEnvVars(spec *cpov1.PostgresSpec, monitor *cpov1.Monitoring) []v1.EnvVar { + envVars := []v1.EnvVar{ { Name: "DATA_SOURCE_URI", Value: "localhost:5432/postgres?sslmode=disable", @@ -3165,7 +3165,17 @@ func (c *Cluster) generateMonitoringEnvVars() []v1.EnvVar { }, }, } - return env + + // fetch monitoring-specific variables that will override all subsequent global variables + if len(monitor.Env) > 0 { + envVars = appendEnvVars(envVars, monitor.Env...) + } + // fetch cluster-specific variables that will override all subsequent global variables + if len(spec.Env) > 0 { + envVars = appendEnvVars(envVars, spec.Env...) + } + + return envVars } func (c *Cluster) getPgbackrestRestoreConfigmapName() (jobName string) { diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index b5122ccb..00f7bec1 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -88,7 +88,7 @@ func (c *Cluster) generateExporterSidecar() *cpov1.Sidecar { Protocol: v1.ProtocolTCP, }, }, - Env: c.generateMonitoringEnvVars(), + Env: c.generateMonitoringEnvVars(&c.Postgresql.Spec, monitor), SecurityContext: &v1.SecurityContext{ AllowPrivilegeEscalation: c.OpConfig.Resources.SpiloAllowPrivilegeEscalation, Privileged: &c.OpConfig.Resources.SpiloPrivileged, From 5a423ef6d3963ad0d1be30e7d46d7c1ef380cd1f Mon Sep 17 00:00:00 2001 From: matthias Date: Sat, 21 Mar 2026 10:10:30 +0100 Subject: [PATCH 08/27] crd fix for env (connection-pooler) --- .../cpo.opensource.cybertec.at/v1/crds.go | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go index c1d66afb..83183a18 100644 --- a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go +++ b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go @@ -338,6 +338,16 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ "user": { Type: "string", }, + "env": { + Type: "array", + Nullable: true, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), + }, + }, + }, }, }, "databases": { @@ -1388,16 +1398,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, - "env": { - Type: "array", - Nullable: true, - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: util.True(), - }, - }, - }, }, }, }, @@ -1453,6 +1453,16 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, + "env": { + Type: "array", + Nullable: true, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), + }, + }, + }, }, }, }, @@ -2341,16 +2351,6 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ "connection_pooler_user": { Type: "string", }, - "env": { - Type: "array", - Nullable: true, - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: util.True(), - }, - }, - }, }, }, "multisite": { From 5c626e43256cc3a76786b822e904523ba4f48a8a Mon Sep 17 00:00:00 2001 From: matthias Date: Sat, 21 Mar 2026 10:18:52 +0100 Subject: [PATCH 09/27] update crd-docu --- docs/hugo/content/en/crd/crd-postgresql.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/hugo/content/en/crd/crd-postgresql.md b/docs/hugo/content/en/crd/crd-postgresql.md index 92325e82..3faa1d6e 100644 --- a/docs/hugo/content/en/crd/crd-postgresql.md +++ b/docs/hugo/content/en/crd/crd-postgresql.md @@ -35,7 +35,7 @@ weight: 331 | enableMasterPoolerLoadBalancer | boolean | false | Define whether to enable the load balancer pointing to the primary ConnectionPooler | | enableReplicaPoolerLoadBalancer| boolean | false | Define whether to enable the load balancer pointing to the Replica-ConnectionPooler | | enableShmVolume | boolean | false | Start a database pod without limitations on shm memory. By default Docker limit /dev/shm to 64M (see e.g. the docker issue, which could be not enough if PostgreSQL uses parallel workers heavily. If this option is present and value is true, to the target database pod will be mounted a new tmpfs volume to remove this limitation. | -| [env](#env) | array | false | Allows to add own Envs to the PostgreSQL containers | +| [env](#env) | array | false | Allows you to add custom environment variables to all cluster containers | | [initContainers](#initcontainers) | array | false | Enables the definition of init-containers | | logicalBackupSchedule | string | false | Enables the scheduling of logical backups based on cron-syntax. Example: `30 00 * * *` | | maintenanceWindows | array | false | Enables the definition of maintenance windows for the cluster. Example: `Sat:00:00-04:00` | @@ -113,6 +113,7 @@ key, operator, value, effect and tolerationSeconds | | Name | Type | required | Description | | ------------------------------ |:-------:| ---------:| ------------------:| +| [env](#env) | array | false | Allows you to add custom environment variables to connection-pooler containers | | numberOfInstances | int | true | Number of Pods per Pooler | | mode | string | true | pooling mode for pgBouncer (session, transaction, statement) | | schema | string | true | Schema for Pooler (Default: pooler) | @@ -153,6 +154,7 @@ key, operator, value, effect and tolerationSeconds | | Name | Type | required | Description | | ------------------------------ |:-------:| ---------:| ------------------:| +| [env](#env) | array | false | Allows you to add custom environment variables to all expoerter-sidecar containers | | image | string | true | Docker-Image for the metric exporter | {{< back >}} @@ -184,6 +186,7 @@ key, operator, value, effect and tolerationSeconds | | Name | Type | required | Description | | ------------------------------ |:-------:| ---------:| ------------------:| +| [env](#env) | array | false | Allows you to add custom environment variables to all postgresql containers | | parameters | map | false | PostgreSQL-Parameter as item (Example: max_connections: "100"). For help check out the [CYBERTEC PostgreSQL Configurator](https://pgconfigurator.cybertec.at) | | version | string | false | a map of key-value pairs describing initdb parameters | @@ -401,6 +404,7 @@ key, operator, value, effect and tolerationSeconds | | Name | Type | required | Description | | ------------------------------ |:-------:| ---------:| ------------------:| | [configuration](#configuration)| object | false | Enables the definition of a pgbackrest-setup for the cluster | +| [env](#env) | array | false | Allows you to add custom environment variables to all pgbackrest containers | | global | object | false | | | image | string | true | | | [repos](#repos) | array | true | | From 36e4a51c35c14df70d2c947f5974f3f9854b3113 Mon Sep 17 00:00:00 2001 From: matthias Date: Sat, 21 Mar 2026 10:21:51 +0100 Subject: [PATCH 10/27] added ENV to pooler doc --- docs/hugo/content/en/connection_pooler/_index.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/hugo/content/en/connection_pooler/_index.md b/docs/hugo/content/en/connection_pooler/_index.md index f62488b2..93c6093a 100644 --- a/docs/hugo/content/en/connection_pooler/_index.md +++ b/docs/hugo/content/en/connection_pooler/_index.md @@ -31,6 +31,7 @@ CPO relies on pgBouncer, a popular and above all lightweight open source tool. p - connection_poole.max_db_connections - How many connections the pooler can max hold. This value is divided among the pooler pods. Default is 60 which will make up 30 connections per pod for the default setup with two instances. - connection_pooler.mode - Defines pooler mode. Available Value: `session`, `transaction` or `statement`. Default is `transaction`. - connection_pooler.resources - Hardware definition for the pooler pods +- env: Allows you to add custom environment variables - enableConnectionPooler - Defines whether poolers for read/write access should be created based on the spec.connectionPooler definition. - enableReplicaConnectionPooler- Defines whether poolers for read-only access should be created based on the spec.connectionPooler definition. @@ -38,6 +39,9 @@ CPO relies on pgBouncer, a popular and above all lightweight open source tool. p ``` spec: connectionPooler: + env: + - name: POOLER_ENV + value: 'custom value' mode: transaction numberOfInstances: 2 resources: From 965c8c3962552e1da23efc673b09eed737470066 Mon Sep 17 00:00:00 2001 From: matthias Date: Mon, 23 Mar 2026 08:55:13 +0100 Subject: [PATCH 11/27] add ENV to documentation and change weight for pages --- docs/hugo/content/en/customize_cluster/env.md | 31 +++++++++++++++++++ .../content/en/customize_cluster/sidecars.md | 2 +- 2 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 docs/hugo/content/en/customize_cluster/env.md diff --git a/docs/hugo/content/en/customize_cluster/env.md b/docs/hugo/content/en/customize_cluster/env.md new file mode 100644 index 00000000..63acb437 --- /dev/null +++ b/docs/hugo/content/en/customize_cluster/env.md @@ -0,0 +1,31 @@ +--- +title: "Environment variables" +date: 2023-12-28T14:26:51+01:00 +draft: false +weight: 1 +--- + +To flexibly manage containers within a cluster, the operator allows environment variables to be defined at various levels. This enables both global settings and specific configurations for individual components. +Hierarchy and Scope +The variables are defined within the Custom Resource (CR). The following logic applies for inheritance and assignment: + +| object | Scope | Description | +| :--- | :--- | :--- | +| `spec.env` | **Global** | These ENVs are inherited by **all** containers within the cluster (PostgreSQL, Backup, Monitoring, etc.). | +| `spec.postgresql.env` | **PostgreSQL** | These ENVs apply exclusively to the **PostgreSQL containers**. | +| `spec.backup.pgbackrest.env` | **PostgreSQL** | These ENVs apply exclusively to the **Backup containers**. | +| `spec.monitor.env` | **PostgreSQL** | These ENVs apply exclusively to the **ConnectionPooler containers**. | +| `spec.connectionPooler.env` | **PostgreSQL** | These ENVs apply exclusively to the **Monitoring sidecars**. | + +{{< hint type=Warning >}}Updating the ENVs triggers a rolling update to the respective containers.{{< /hint >}} + + +### Configuration Logic + +The definition of variables follows the standard Kubernetes schema for key-value pairs. + +```yaml +env: + - name: ENV_NAME + value: ‘value’ +``` \ No newline at end of file diff --git a/docs/hugo/content/en/customize_cluster/sidecars.md b/docs/hugo/content/en/customize_cluster/sidecars.md index babbec38..3105ff4c 100644 --- a/docs/hugo/content/en/customize_cluster/sidecars.md +++ b/docs/hugo/content/en/customize_cluster/sidecars.md @@ -2,7 +2,7 @@ title: "Sidecars" date: 2023-12-28T14:26:51+01:00 draft: false -weight: 1 +weight: 2 --- Starting with the Single-Node-Cluster from the previous section, we want to modify the Instance a bit to see. ## CPU and Memory From 6ac945de7f363f01248999f0902ff50617b338c4 Mon Sep 17 00:00:00 2001 From: matthias Date: Mon, 23 Mar 2026 11:03:33 +0100 Subject: [PATCH 12/27] add label-array to crd --- .../cpo.opensource.cybertec.at/v1/crds.go | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go index 83183a18..04c6e32e 100644 --- a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go +++ b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go @@ -348,6 +348,14 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, + "labels": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, }, }, "databases": { @@ -395,6 +403,14 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, + "labels": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "init_containers": { Type: "array", Description: "deprecated", @@ -779,6 +795,14 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, + "labels": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, }, }, "preparedDatabases": { @@ -1463,6 +1487,14 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, + "labels": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, }, }, }, @@ -1505,6 +1537,14 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, + "labels": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, }, }, }, From 41dee356b8948d349382a3e2bd26942cedd2d606 Mon Sep 17 00:00:00 2001 From: matthias Date: Mon, 23 Mar 2026 11:07:51 +0100 Subject: [PATCH 13/27] remove labels from monitor-object, bcs. only use postgres-object for definition --- pkg/apis/cpo.opensource.cybertec.at/v1/crds.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go index 04c6e32e..c21ac36c 100644 --- a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go +++ b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go @@ -1537,14 +1537,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, - "labels": { - Type: "array", - Items: &apiextv1.JSONSchemaPropsOrArray{ - Schema: &apiextv1.JSONSchemaProps{ - Type: "string", - }, - }, - }, }, }, }, From fc7286741184b0914263cfa00de3be31d297b898 Mon Sep 17 00:00:00 2001 From: matthias Date: Mon, 23 Mar 2026 13:07:26 +0100 Subject: [PATCH 14/27] updated crd labels with value --- .../cpo.opensource.cybertec.at/v1/crds.go | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go index c21ac36c..fa2951c3 100644 --- a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go +++ b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go @@ -349,10 +349,12 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, "labels": { - Type: "array", + Type: "array", + Nullable: true, Items: &apiextv1.JSONSchemaPropsOrArray{ Schema: &apiextv1.JSONSchemaProps{ - Type: "string", + Type: "object", + XPreserveUnknownFields: util.True(), }, }, }, @@ -404,10 +406,12 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, "labels": { - Type: "array", + Type: "array", + Nullable: true, Items: &apiextv1.JSONSchemaPropsOrArray{ Schema: &apiextv1.JSONSchemaProps{ - Type: "string", + Type: "object", + XPreserveUnknownFields: util.True(), }, }, }, @@ -796,10 +800,12 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, "labels": { - Type: "array", + Type: "array", + Nullable: true, Items: &apiextv1.JSONSchemaPropsOrArray{ Schema: &apiextv1.JSONSchemaProps{ - Type: "string", + Type: "object", + XPreserveUnknownFields: util.True(), }, }, }, @@ -1488,10 +1494,12 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, "labels": { - Type: "array", + Type: "array", + Nullable: true, Items: &apiextv1.JSONSchemaPropsOrArray{ Schema: &apiextv1.JSONSchemaProps{ - Type: "string", + Type: "object", + XPreserveUnknownFields: util.True(), }, }, }, From 38268adef1b9ac0b69a1b925ba82a75feb57acbd Mon Sep 17 00:00:00 2001 From: matthias Date: Mon, 23 Mar 2026 13:07:59 +0100 Subject: [PATCH 15/27] add labels to connectionpooler --- pkg/cluster/connection_pooler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 28df9d1a..3c6d0ca2 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -112,7 +112,7 @@ func (c *Cluster) poolerUser(spec *cpov1.PostgresSpec) string { // when listing pooler k8s objects func (c *Cluster) poolerLabelsSet(addExtraLabels bool) labels.Set { - poolerLabels := c.labelsSet(addExtraLabels) + poolerLabels := c.labelsSetWithType(addExtraLabels, TYPE_POOLER) // TODO should be config values poolerLabels["application"] = "db-connection-pooler" return poolerLabels From 7abe03417c2c49a5615325a94d47c9809126afb1 Mon Sep 17 00:00:00 2001 From: matthias Date: Mon, 23 Mar 2026 13:08:47 +0100 Subject: [PATCH 16/27] update util.go for handling pod-specific-labels --- pkg/cluster/util.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 3a54bc1e..1b71c01a 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -548,6 +548,30 @@ func (c *Cluster) labelsSetWithType(shouldAddExtraLabels bool, typeLabel PodType } if shouldAddExtraLabels { + for _, label := range c.Postgresql.Spec.Labels { + lbls[label.Name] = label.Value + } + switch typeLabel { + case TYPE_POSTGRESQL: + for _, label := range c.Postgresql.Spec.PostgresqlParam.Labels { + lbls[label.Name] = label.Value + } + + case TYPE_REPOSITORY, TYPE_BACKUP_JOB: + if c.Postgresql.Spec.Backup != nil && c.Postgresql.Spec.Backup.Pgbackrest != nil { + for _, label := range c.Postgresql.Spec.Backup.Pgbackrest.Labels { + lbls[label.Name] = label.Value + } + } + + case TYPE_POOLER: + if c.Postgresql.Spec.ConnectionPooler != nil { + for _, label := range c.Postgresql.Spec.ConnectionPooler.Labels { + lbls[label.Name] = label.Value + } + } + } + // enables filtering resources owned by a team lbls["team"] = c.Postgresql.Spec.TeamID From c6b8c565ab6e6d5f8231eabd0d8bf7049e67e33a Mon Sep 17 00:00:00 2001 From: matthias Date: Mon, 23 Mar 2026 13:09:18 +0100 Subject: [PATCH 17/27] update util.go for handling pod-specific-labels --- pkg/cluster/util.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 1b71c01a..a3bf17dc 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -556,6 +556,11 @@ func (c *Cluster) labelsSetWithType(shouldAddExtraLabels bool, typeLabel PodType for _, label := range c.Postgresql.Spec.PostgresqlParam.Labels { lbls[label.Name] = label.Value } + if c.Postgresql.Spec.Backup != nil && c.Postgresql.Spec.Backup.Pgbackrest != nil { + for _, label := range c.Postgresql.Spec.Backup.Pgbackrest.Labels { + lbls[label.Name] = label.Value + } + } case TYPE_REPOSITORY, TYPE_BACKUP_JOB: if c.Postgresql.Spec.Backup != nil && c.Postgresql.Spec.Backup.Pgbackrest != nil { From 26a1c1445bdd2803dc88b48f4312a3628ffec401 Mon Sep 17 00:00:00 2001 From: matthias Date: Mon, 23 Mar 2026 14:08:18 +0100 Subject: [PATCH 18/27] Ensure based on label changes pooler deployment is synced --- pkg/cluster/connection_pooler.go | 57 ++++++++++++++------------------ pkg/cluster/sync.go | 14 ++++++-- 2 files changed, 35 insertions(+), 36 deletions(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 3c6d0ca2..e56568c0 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -2,7 +2,9 @@ package cluster import ( "context" + "encoding/json" "fmt" + "reflect" "strings" "time" @@ -683,14 +685,19 @@ func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDe return nil, fmt.Errorf("there is no connection pooler in the cluster") } - patchData, err := specPatch(newDeployment.Spec) + // Wir erstellen ein kombiniertes Patch-Objekt + patch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": newDeployment.Labels, + }, + "spec": newDeployment.Spec, + } + + patchData, err := json.Marshal(patch) if err != nil { return nil, fmt.Errorf("could not form patch for the connection pooler deployment: %v", err) } - // An update probably requires RetryOnConflict, but since only one operator - // worker at one time will try to update it chances of conflicts are - // minimal. deployment, err := KubeClient. Deployments(newDeployment.Namespace).Patch( context.TODO(), @@ -1009,47 +1016,31 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *cpov1.Postgresql, c.ConnectionPooler[role].Deployment = deployment // actual synchronization - var oldConnectionPooler *cpov1.ConnectionPooler - - if oldSpec != nil { - oldConnectionPooler = oldSpec.Spec.ConnectionPooler - } - - newConnectionPooler := newSpec.Spec.ConnectionPooler - // sync implementation below assumes that both old and new specs are - // not nil, but it can happen. To avoid any confusion like updating a - // deployment because the specification changed from nil to an empty - // struct (that was initialized somewhere before) replace any nil with - // an empty spec. - if oldConnectionPooler == nil { - oldConnectionPooler = &cpov1.ConnectionPooler{} - } - - if newConnectionPooler == nil { - newConnectionPooler = &cpov1.ConnectionPooler{} + desired, err := c.generateConnectionPoolerDeployment(c.ConnectionPooler[role]) + if err != nil { + return NoSync, fmt.Errorf("could not generate desired deployment: %v", err) } var specSync bool var specReason []string + labelsSync := !reflect.DeepEqual(deployment.Labels, desired.Labels) + if labelsSync { + syncReason = append(syncReason, "labels changed") + } + if oldSpec != nil { - specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler, c.logger) + specSync, specReason = needSyncConnectionPoolerSpecs(oldSpec.Spec.ConnectionPooler, newSpec.Spec.ConnectionPooler, c.logger) syncReason = append(syncReason, specReason...) } - defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment) + defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(&c.Config, newSpec.Spec.ConnectionPooler, deployment) syncReason = append(syncReason, defaultsReason...) - if specSync || defaultsSync { - c.logger.Infof("update connection pooler deployment %s, reason: %+v", - c.connectionPoolerName(role), syncReason) - newDeployment, err = c.generateConnectionPoolerDeployment(c.ConnectionPooler[role]) - if err != nil { - return syncReason, fmt.Errorf("could not generate deployment for connection pooler: %v", err) - } - - deployment, err = updateConnectionPoolerDeployment(c.KubeClient, newDeployment) + if labelsSync || specSync || defaultsSync { + c.logger.Infof("update connection pooler deployment %s, reason: %+v", c.connectionPoolerName(role), syncReason) + deployment, err = updateConnectionPoolerDeployment(c.KubeClient, desired) if err != nil { return syncReason, err } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index cbfaba2b..258b840d 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -172,6 +172,7 @@ func generateSerialNumber() (*big.Int, error) { // Unlike the update, sync does not error out if some objects do not exist and takes care of creating them. func (c *Cluster) Sync(newSpec *cpov1.Postgresql) error { var err error + var syncErrors []error c.mu.Lock() defer c.mu.Unlock() @@ -234,7 +235,8 @@ func (c *Cluster) Sync(newSpec *cpov1.Postgresql) error { if err = c.syncStatefulSet(); err != nil { if !k8sutil.ResourceAlreadyExists(err) { err = fmt.Errorf("could not sync statefulsets: %v", err) - return err + syncErrors = append(syncErrors, err) + // return err } } @@ -304,7 +306,9 @@ func (c *Cluster) Sync(newSpec *cpov1.Postgresql) error { // sync connection pooler if _, err = c.syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil { - return fmt.Errorf("could not sync connection pooler: %v", err) + // return fmt.Errorf("could not sync connection pooler: %v", err) + err = fmt.Errorf("could not sync connection pooler: %v", err) + syncErrors = append(syncErrors, err) } if len(c.Spec.Streams) > 0 { @@ -331,7 +335,11 @@ func (c *Cluster) Sync(newSpec *cpov1.Postgresql) error { c.logger.Errorf("major version upgrade failed: %v", err) } - return err + if len(syncErrors) > 0 { + return fmt.Errorf("multiple sync errors: %v", syncErrors) + } + return nil + // return err } func (c *Cluster) deletePgbackrestRepoHostObjects() error { From ec804f9ec0fdc5f1f762e5ebd8d0875274baeb2c Mon Sep 17 00:00:00 2001 From: matthias Date: Thu, 26 Mar 2026 19:04:57 +0100 Subject: [PATCH 19/27] add labels to connectionPooler and update code and test_class --- pkg/cluster/connection_pooler.go | 98 ++++++++++++++++++--------- pkg/cluster/connection_pooler_test.go | 12 ++-- 2 files changed, 71 insertions(+), 39 deletions(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index e56568c0..63fb5dfe 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -2,7 +2,6 @@ package cluster import ( "context" - "encoding/json" "fmt" "reflect" "strings" @@ -112,9 +111,9 @@ func (c *Cluster) poolerUser(spec *cpov1.PostgresSpec) string { } // when listing pooler k8s objects -func (c *Cluster) poolerLabelsSet(addExtraLabels bool) labels.Set { +func (c *Cluster) poolerLabelsSet(addExtraLabels bool, isPod bool) labels.Set { - poolerLabels := c.labelsSetWithType(addExtraLabels, TYPE_POOLER) + poolerLabels := c.labelsSetWithType(addExtraLabels, TYPE_POOLER, isPod) // TODO should be config values poolerLabels["application"] = "db-connection-pooler" return poolerLabels @@ -126,8 +125,8 @@ func (c *Cluster) poolerLabelsSet(addExtraLabels bool) labels.Set { // have e.g. different `application` label, so that recreatePod operation will // not interfere with it (it lists all the pods via labels, and if there would // be no difference, it will recreate also pooler pods). -func (c *Cluster) connectionPoolerLabels(role PostgresRole, addExtraLabels bool) *metav1.LabelSelector { - poolerLabelsSet := c.poolerLabelsSet(addExtraLabels) +func (c *Cluster) connectionPoolerLabels(addExtraLabels bool, role PostgresRole, isPod bool) *metav1.LabelSelector { + poolerLabelsSet := c.poolerLabelsSet(addExtraLabels, isPod) // TODO should be config values poolerLabelsSet["connection-pooler"] = c.connectionPoolerName(role) @@ -412,7 +411,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( podTemplate := &v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: c.connectionPoolerLabels(role, true).MatchLabels, + Labels: c.connectionPoolerLabels(true, role, true).MatchLabels, Namespace: c.Namespace, Annotations: c.annotationsSet(c.generatePodAnnotations(spec)), }, @@ -429,7 +428,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( nodeAffinity := c.nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity) if c.OpConfig.EnablePodAntiAffinity { - labelsSet := labels.Set(c.connectionPoolerLabels(role, false).MatchLabels) + labelsSet := labels.Set(c.connectionPoolerLabels(false, role, false).MatchLabels) podTemplate.Spec.Affinity = podAffinity( labelsSet, c.OpConfig.PodAntiAffinityTopologyKey, @@ -482,7 +481,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio ObjectMeta: metav1.ObjectMeta{ Name: connectionPooler.Name, Namespace: connectionPooler.Namespace, - Labels: c.connectionPoolerLabels(connectionPooler.Role, true).MatchLabels, + Labels: c.connectionPoolerLabels(true, connectionPooler.Role, false).MatchLabels, Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" @@ -494,7 +493,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio }, Spec: appsv1.DeploymentSpec{ Replicas: numberOfInstances, - Selector: c.connectionPoolerLabels(connectionPooler.Role, false), + Selector: c.connectionPoolerLabels(false, connectionPooler.Role, false), Template: *podTemplate, }, } @@ -527,7 +526,7 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo ObjectMeta: metav1.ObjectMeta{ Name: connectionPooler.Name, Namespace: connectionPooler.Namespace, - Labels: c.connectionPoolerLabels(connectionPooler.Role, false).MatchLabels, + Labels: c.connectionPoolerLabels(false, connectionPooler.Role, true).MatchLabels, Annotations: c.annotationsSet(c.generatePoolerServiceAnnotations(poolerRole, spec)), // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" @@ -678,36 +677,21 @@ func (c *Cluster) deleteConnectionPoolerSecret() (err error) { return nil } -// Perform actual patching of a connection pooler deployment, assuming that all +// Perform updating the connection pooler deployment, assuming that all // the check were already done before. func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) { if newDeployment == nil { return nil, fmt.Errorf("there is no connection pooler in the cluster") } - // Wir erstellen ein kombiniertes Patch-Objekt - patch := map[string]interface{}{ - "metadata": map[string]interface{}{ - "labels": newDeployment.Labels, - }, - "spec": newDeployment.Spec, - } - - patchData, err := json.Marshal(patch) - if err != nil { - return nil, fmt.Errorf("could not form patch for the connection pooler deployment: %v", err) - } - deployment, err := KubeClient. - Deployments(newDeployment.Namespace).Patch( + Deployments(newDeployment.Namespace).Update( context.TODO(), - newDeployment.Name, - types.MergePatchType, - patchData, - metav1.PatchOptions{}, - "") + newDeployment, + metav1.UpdateOptions{}) + if err != nil { - return nil, fmt.Errorf("could not patch connection pooler deployment: %v", err) + return nil, fmt.Errorf("could not update connection pooler deployment: %v", err) } return deployment, nil @@ -1021,6 +1005,48 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *cpov1.Postgresql, return NoSync, fmt.Errorf("could not generate desired deployment: %v", err) } + // Check if replacement is needed because of selector changes + if !reflect.DeepEqual(deployment.Spec.Selector, desired.Spec.Selector) { + c.logger.Warningf("selector changed for connection pooler %s, recreating deployment", deployment.Name) + + policy := metav1.DeletePropagationForeground + options := metav1.DeleteOptions{PropagationPolicy: &policy} + + err := c.KubeClient.Deployments(c.Namespace).Delete(context.TODO(), deployment.Name, options) + if err != nil && !k8sutil.ResourceNotFound(err) { + return NoSync, fmt.Errorf("could not delete pooler deployment for recreation: %v", err) + } + + c.logger.Debugf("waiting for the pooler deployment %s to be deleted", deployment.Name) + err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, + func() (bool, error) { + _, err2 := c.KubeClient.Deployments(c.Namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) + if err2 == nil { + return false, nil + } + if k8sutil.ResourceNotFound(err2) { + return true, nil + } + return false, err2 + }) + + if err != nil { + return NoSync, fmt.Errorf("timeout waiting for pooler deployment deletion: %v", err) + } + + deployment, err = c.KubeClient. + Deployments(desired.Namespace). + Create(context.TODO(), desired, metav1.CreateOptions{}) + + if err != nil { + return NoSync, fmt.Errorf("could not recreate pooler deployment: %v", err) + } + + c.ConnectionPooler[role].Deployment = deployment + c.logger.Infof("successfully recreated pooler deployment %s with new selector", deployment.Name) + + } + var specSync bool var specReason []string @@ -1037,7 +1063,13 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *cpov1.Postgresql, defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(&c.Config, newSpec.Spec.ConnectionPooler, deployment) syncReason = append(syncReason, defaultsReason...) - if labelsSync || specSync || defaultsSync { + // to ensure we're also fetching global-label changes + templateSync := !reflect.DeepEqual(deployment.Spec.Template.Labels, desired.Spec.Template.Labels) + if templateSync { + syncReason = append(syncReason, "pod template labels changed") + } + + if labelsSync || specSync || defaultsSync || templateSync { c.logger.Infof("update connection pooler deployment %s, reason: %+v", c.connectionPoolerName(role), syncReason) deployment, err = updateConnectionPoolerDeployment(c.KubeClient, desired) @@ -1059,7 +1091,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *cpov1.Postgresql, // check if pooler pods must be replaced due to secret update listOptions := metav1.ListOptions{ - LabelSelector: labels.Set(c.connectionPoolerLabels(role, true).MatchLabels).String(), + LabelSelector: labels.Set(c.connectionPoolerLabels(true, role, false).MatchLabels).String(), } pods, err = c.listPoolerPods(listOptions) if err != nil { diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index 3e9c9204..50b2da52 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -65,7 +65,7 @@ func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { } for _, role := range []PostgresRole{Master, Replica} { - poolerLabels := cluster.poolerLabelsSet(false) + poolerLabels := cluster.poolerLabelsSet(false, false) poolerLabels["application"] = "db-connection-pooler" poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role) @@ -86,7 +86,7 @@ func MasterObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error return fmt.Errorf("Connection pooler resources are empty") } - poolerLabels := cluster.poolerLabelsSet(false) + poolerLabels := cluster.poolerLabelsSet(false, false) poolerLabels["application"] = "db-connection-pooler" poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Master) @@ -106,7 +106,7 @@ func ReplicaObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) erro return fmt.Errorf("Connection pooler resources are empty") } - poolerLabels := cluster.poolerLabelsSet(false) + poolerLabels := cluster.poolerLabelsSet(false, false) poolerLabels["application"] = "db-connection-pooler" poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Replica) @@ -924,9 +924,9 @@ func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresR func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"] - if poolerLabels != cluster.connectionPoolerLabels(role, true).MatchLabels["connection-pooler"] { + if poolerLabels != cluster.connectionPoolerLabels(true, role, true).MatchLabels["connection-pooler"] { return fmt.Errorf("Pod labels do not match, got %+v, expected %+v", - podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabels(role, true).MatchLabels) + podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabels(true, role, true).MatchLabels) } return nil @@ -934,7 +934,7 @@ func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error { labels := deployment.Spec.Selector.MatchLabels - expected := cluster.connectionPoolerLabels(Master, true).MatchLabels + expected := cluster.connectionPoolerLabels(true, Master, false).MatchLabels if labels["connection-pooler"] != expected["connection-pooler"] { return fmt.Errorf("Labels are incorrect, got %+v, expected %+v", From 396f08a886d585b7b6b4a8c5895f543a14fba0f6 Mon Sep 17 00:00:00 2001 From: matthias Date: Thu, 26 Mar 2026 19:05:35 +0100 Subject: [PATCH 20/27] add labels --- .../cpo.opensource.cybertec.at/v1/postgresql_type.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go b/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go index 576dd85b..1c5da833 100644 --- a/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go +++ b/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go @@ -89,13 +89,14 @@ type PostgresSpec struct { AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"` Streams []Stream `json:"streams,omitempty"` Env []v1.EnvVar `json:"env,omitempty"` + Labels []v1.EnvVar `name:"labels" default:""` + Backup *Backup `json:"backup,omitempty"` + TDE *TDE `json:"tde,omitempty"` + Monitoring *Monitoring `json:"monitor,omitempty"` // deprecated json tags InitContainersOld []v1.Container `json:"init_containers,omitempty"` PodPriorityClassNameOld string `json:"pod_priority_class_name,omitempty"` - Backup *Backup `json:"backup,omitempty"` - TDE *TDE `json:"tde,omitempty"` - Monitoring *Monitoring `json:"monitor,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -155,6 +156,7 @@ type PostgresqlParam struct { PgVersion string `json:"version"` Parameters map[string]string `json:"parameters,omitempty"` Env []v1.EnvVar `json:"env,omitempty"` + Labels []v1.EnvVar `name:"labels" default:""` } // ResourceDescription describes CPU and memory resources defined for a cluster. @@ -254,6 +256,7 @@ type ConnectionPooler struct { DockerImage string `json:"dockerImage,omitempty"` MaxDBConnections *int32 `json:"maxDBConnections,omitempty"` Env []v1.EnvVar `json:"env,omitempty"` + Labels []v1.EnvVar `name:"labels" default:""` *Resources `json:"resources,omitempty"` } @@ -288,6 +291,7 @@ type Pgbackrest struct { Configuration Configuration `json:"configuration"` Resources *Resources `json:"resources,omitempty"` Env []v1.EnvVar `json:"env,omitempty"` + Labels []v1.EnvVar `name:"labels" default:""` } type PgbackrestClone struct { From fb23185726a2236eb785ce2f059c054a11e99c91 Mon Sep 17 00:00:00 2001 From: matthias Date: Thu, 26 Mar 2026 19:07:08 +0100 Subject: [PATCH 21/27] add labels to connectionPooler, postgres,pgbackrest --- pkg/cluster/cluster.go | 61 ++++++++++++++++++++++++++++----------- pkg/cluster/k8sres.go | 23 +++++++-------- pkg/cluster/pod.go | 2 +- pkg/cluster/resources.go | 34 +++++++++++++++------- pkg/cluster/sync.go | 2 +- pkg/cluster/util.go | 62 +++++++++++++++++++++------------------- 6 files changed, 114 insertions(+), 70 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 1ee54d75..58f60a1f 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -526,8 +526,8 @@ func (c *Cluster) compareStatefulSetWith(oldSts, newSts *appsv1.StatefulSet) *co reasons := make([]string, 0) var match, needsRollUpdate, needsReplace bool - match = true + //TODO: improve me if *oldSts.Spec.Replicas != *newSts.Spec.Replicas { match = false @@ -593,11 +593,10 @@ func (c *Cluster) compareStatefulSetWith(oldSts, newSts *appsv1.StatefulSet) *co reasons = append(reasons, "new statefulset's pod topologySpreadConstraints does not match the current one") } - // Some generated fields like creationTimestamp make it not possible to use DeepCompare on Spec.Template.ObjectMeta if !reflect.DeepEqual(oldSts.Spec.Template.Labels, newSts.Spec.Template.Labels) { - needsReplace = true + match = false needsRollUpdate = true - reasons = append(reasons, "new statefulset's metadata labels does not match the current one") + reasons = append(reasons, "new statefulset's pod template labels do not match the current one") } if (oldSts.Spec.Selector != nil) && (newSts.Spec.Selector != nil) { if !reflect.DeepEqual(oldSts.Spec.Selector.MatchLabels, newSts.Spec.Selector.MatchLabels) { @@ -1020,6 +1019,32 @@ func (c *Cluster) Update(oldSpec, newSpec *cpov1.Postgresql) error { syncStatefulSet = true } + // Label-check for pg-pods + pgLabelsChanged := !reflect.DeepEqual(oldSpec.Spec.Labels, newSpec.Spec.Labels) || + !reflect.DeepEqual(oldSpec.Spec.PostgresqlParam.Labels, newSpec.Spec.PostgresqlParam.Labels) + + if pgLabelsChanged { + c.logger.Infof("Labels for Postgres changed, forcing StatefulSet sync") + syncStatefulSet = true + } + + // Label-check for pgbackrest-pods + var oldRepoL, newRepoL []v1.EnvVar + + if oldSpec.Spec.Backup != nil && oldSpec.Spec.Backup.Pgbackrest != nil { + oldRepoL = oldSpec.Spec.Backup.Pgbackrest.Labels + } + if newSpec.Spec.Backup != nil && newSpec.Spec.Backup.Pgbackrest != nil { + newRepoL = newSpec.Spec.Backup.Pgbackrest.Labels + } + + repoLabelsChanged := !reflect.DeepEqual(oldSpec.Spec.Labels, newSpec.Spec.Labels) || + !reflect.DeepEqual(oldRepoL, newRepoL) + + if repoLabelsChanged { + c.logger.Infof("Labels for pgBackRest changed, forcing Statefulset and Cronjob sync") + } + //sync sts when there is a change in the pgbackrest secret, since we need to mount this if newSpec.Spec.Backup != nil && oldSpec.Spec.Backup != nil && newSpec.Spec.Backup.Pgbackrest != nil && oldSpec.Spec.Backup.Pgbackrest != nil && @@ -1029,7 +1054,16 @@ func (c *Cluster) Update(oldSpec, newSpec *cpov1.Postgresql) error { // Pgbackrest backup job func() { - if specHasPgbackrestPVCRepo(&newSpec.Spec) || specHasPgbackrestPVCRepo(&oldSpec.Spec) { + + repoLabelsChanged := !reflect.DeepEqual(oldSpec.Spec.Labels, newSpec.Spec.Labels) + if oldSpec.Spec.Backup != nil && newSpec.Spec.Backup != nil && + oldSpec.Spec.Backup.Pgbackrest != nil && newSpec.Spec.Backup.Pgbackrest != nil { + if !reflect.DeepEqual(oldSpec.Spec.Backup.Pgbackrest.Labels, newSpec.Spec.Backup.Pgbackrest.Labels) { + repoLabelsChanged = true + } + } + + if specHasPgbackrestPVCRepo(&newSpec.Spec) || specHasPgbackrestPVCRepo(&oldSpec.Spec) || repoLabelsChanged { if err := c.syncPgbackrestRepoHostConfig(&newSpec.Spec); err != nil { updateFailed = true return @@ -1044,10 +1078,12 @@ func (c *Cluster) Update(oldSpec, newSpec *cpov1.Postgresql) error { } c.logger.Info("a pgbackrest config has been successfully created") - if err := c.syncPgbackrestJob(false); err != nil { - err = fmt.Errorf("could not create a k8s cron job for pgbackrest: %v", err) - updateFailed = true - return + if repoLabelsChanged || !reflect.DeepEqual(oldSpec.Spec.Backup, newSpec.Spec.Backup) { + if err := c.syncPgbackrestJob(false); err != nil { + err = fmt.Errorf("could not create a k8s cron job for pgbackrest: %v", err) + updateFailed = true + return + } } c.logger.Info("a k8s cron job for pgbackrest has been successfully created") } else if oldSpec.Spec.GetBackup().Pgbackrest != nil { @@ -1113,13 +1149,6 @@ func (c *Cluster) Update(oldSpec, newSpec *cpov1.Postgresql) error { c.logger.Errorf("could not sync statefulsets: %v", err) updateFailed = true } - // TODO: avoid generating the StatefulSet object twice by passing it to syncStatefulSet - if err := c.syncStatefulSet(); err != nil { - c.logger.Errorf("could not sync statefulsets: %v", err) - updateFailed = true - return - } - } }() diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index e1f4756c..7f7d7df6 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1642,7 +1642,7 @@ func (c *Cluster) generateStatefulSet(spec *cpov1.PostgresSpec) (*appsv1.Statefu // generate pod template for the statefulset, based on the spilo container and sidecars podTemplate, err = c.generatePodTemplate( c.Namespace, - c.labelsSetWithType(true, TYPE_POSTGRESQL), + c.labelsSetWithType(true, TYPE_POSTGRESQL, true), c.annotationsSet(podAnnotations), spiloContainer, initContainers, @@ -1711,7 +1711,7 @@ func (c *Cluster) generateStatefulSet(spec *cpov1.PostgresSpec) (*appsv1.Statefu ObjectMeta: metav1.ObjectMeta{ Name: c.statefulSetName(), Namespace: c.Namespace, - Labels: c.labelsSetWithType(true, TYPE_POSTGRESQL), + Labels: c.labelsSetWithType(true, TYPE_POSTGRESQL, false), Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), OwnerReferences: c.createOwnerReference(), }, @@ -1909,12 +1909,11 @@ func (c *Cluster) generateRepoHostStatefulSet(spec *cpov1.PostgresSpec) (*appsv1 effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) podAnnotations := c.generatePodAnnotations(spec) - repoHostLabels := c.labelsSetWithType(true, TYPE_REPOSITORY) // generate pod template for the statefulset, based on the spilo container and sidecars podTemplate, err = c.generatePodTemplate( c.Namespace, - repoHostLabels, + c.labelsSetWithType(true, TYPE_REPOSITORY, true), c.annotationsSet(podAnnotations), repoContainer, initContainers, @@ -1986,7 +1985,7 @@ func (c *Cluster) generateRepoHostStatefulSet(spec *cpov1.PostgresSpec) (*appsv1 ObjectMeta: metav1.ObjectMeta{ Name: c.getPgbackrestRepoHostName(), Namespace: c.Namespace, - Labels: repoHostLabels, + Labels: c.labelsSetWithType(true, TYPE_REPOSITORY, false), Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), OwnerReferences: c.createOwnerReference(), }, @@ -2545,7 +2544,7 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) lbls := c.labelsSet(true) if username == constants.ConnectionPoolerUserName { - lbls = c.connectionPoolerLabels("", false).MatchLabels + lbls = c.connectionPoolerLabels(false, "", false).MatchLabels } secret := v1.Secret{ @@ -2874,7 +2873,7 @@ func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget { Spec: policyv1.PodDisruptionBudgetSpec{ MinAvailable: &minAvailable, Selector: &metav1.LabelSelector{ - MatchLabels: c.labelsSetWithType(false, "postgresql"), //c.roleLabelsSet(false, Master), + MatchLabels: c.labelsSetWithType(false, "postgresql", false), //c.roleLabelsSet(false, Master), }, }, } @@ -2938,7 +2937,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { // re-use the method that generates DB pod templates if podTemplate, err = c.generatePodTemplate( c.Namespace, - c.labelsSetWithType(true, TYPE_LOGICAL_BACKUP), + c.labelsSetWithType(true, TYPE_LOGICAL_BACKUP, false), annotations, logicalBackupContainer, []v1.Container{}, @@ -2991,7 +2990,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { ObjectMeta: metav1.ObjectMeta{ Name: c.getLogicalBackupJobName(), Namespace: c.Namespace, - Labels: c.labelsSetWithType(true, TYPE_LOGICAL_BACKUP), + Labels: c.labelsSetWithType(true, TYPE_LOGICAL_BACKUP, false), Annotations: c.annotationsSet(nil), }, Spec: batchv1.CronJobSpec{ @@ -3482,7 +3481,7 @@ func (c *Cluster) generatePgbackrestJob(spec *cpov1.PostgresSpec, backup *cpov1. // re-use the method that generates DB pod templates if podTemplate, err = c.generatePodTemplate( c.Namespace, - c.labelsSetWithType(true, TYPE_BACKUP_JOB), + c.labelsSetWithType(true, TYPE_BACKUP_JOB, true), annotations, pgbackrestContainer, []v1.Container{}, @@ -3534,7 +3533,7 @@ func (c *Cluster) generatePgbackrestJob(spec *cpov1.PostgresSpec, backup *cpov1. ObjectMeta: metav1.ObjectMeta{ Name: c.getPgbackrestJobName(repo.Name, backupType), Namespace: c.Namespace, - Labels: c.labelsSetWithType(true, TYPE_BACKUP_JOB), + Labels: c.labelsSetWithType(true, TYPE_BACKUP_JOB, false), Annotations: c.annotationsSet(nil), }, Spec: batchv1.CronJobSpec{ @@ -3553,7 +3552,7 @@ func (c *Cluster) generatePgbackrestBackupJobEnvVars(spec *cpov1.PostgresSpec, r if repo.Storage == "pvc" { // With a PVC based repo the backup command needs to run on the repository system // due to pgbackrest limitations - selector = c.labelsSetWithType(false, TYPE_REPOSITORY).String() + selector = c.labelsSetWithType(false, TYPE_REPOSITORY, false).String() targetContainer = constants.RepoContainerName } diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index af6a3063..b24d4734 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -34,7 +34,7 @@ func (c *Cluster) listPods() ([]v1.Pod, error) { func (c *Cluster) listPodsOfType(podType PodType) ([]v1.Pod, error) { listOptions := metav1.ListOptions{ - LabelSelector: c.labelsSetWithType(false, podType).String(), + LabelSelector: c.labelsSetWithType(false, podType, false).String(), } pods, err := c.KubeClient.Pods(c.Namespace).List(context.TODO(), listOptions) diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 00f7bec1..e676d37c 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -241,25 +241,37 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error { c.logger.Warningf("could not scale down: %v", err) } } - c.logger.Debugf("updating statefulset") - patchData, err := specPatch(newStatefulSet.Spec) + currentSts, err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Get( + context.TODO(), + c.Statefulset.Name, + metav1.GetOptions{}, + ) if err != nil { - return fmt.Errorf("could not form patch for the statefulset %q: %v", statefulSetName, err) + return fmt.Errorf("could not get current statefulset %q: %v", statefulSetName, err) } - statefulSet, err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Patch( + c.logger.Debugf("updating statefulset %q via full update to sync labels", statefulSetName) + + currentSts.Labels = newStatefulSet.Labels + currentSts.Annotations = newStatefulSet.Annotations + + currentSts.Spec.Replicas = newStatefulSet.Spec.Replicas + currentSts.Spec.Template = newStatefulSet.Spec.Template + currentSts.Spec.UpdateStrategy = newStatefulSet.Spec.UpdateStrategy + currentSts.Spec.PodManagementPolicy = newStatefulSet.Spec.PodManagementPolicy + currentSts.Spec.PersistentVolumeClaimRetentionPolicy = newStatefulSet.Spec.PersistentVolumeClaimRetentionPolicy + + updatedSts, err := c.KubeClient.StatefulSets(currentSts.Namespace).Update( context.TODO(), - c.Statefulset.Name, - types.MergePatchType, - patchData, - metav1.PatchOptions{}, - "") + currentSts, + metav1.UpdateOptions{}, + ) if err != nil { - return fmt.Errorf("could not patch statefulset spec %q: %v", statefulSetName, err) + return fmt.Errorf("could not update statefulset spec %q: %v", statefulSetName, err) } - c.Statefulset = statefulSet + c.Statefulset = updatedSts return nil } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 258b840d..1ccbb4dc 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -1198,7 +1198,7 @@ func (c *Cluster) rotatePasswordInSecret( // when password of connection pooler is rotated in place, pooler pods have to be replaced if roleOrigin == spec.RoleOriginConnectionPooler { listOptions := metav1.ListOptions{ - LabelSelector: c.poolerLabelsSet(true).String(), + LabelSelector: c.poolerLabelsSet(true, false).String(), } poolerPods, err := c.listPoolerPods(listOptions) if err != nil { diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index a3bf17dc..19dba9c4 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -398,7 +398,7 @@ func (c *Cluster) waitStatefulsetReady() error { return retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, func() (bool, error) { listOptions := metav1.ListOptions{ - LabelSelector: c.labelsSetWithType(false, TYPE_POSTGRESQL).String(), + LabelSelector: c.labelsSetWithType(false, TYPE_POSTGRESQL, false).String(), } ss, err := c.KubeClient.StatefulSets(c.Namespace).List(context.TODO(), listOptions) if err != nil { @@ -417,7 +417,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error { var ( podsNumber int ) - ls := c.labelsSetWithType(false, TYPE_POSTGRESQL) + ls := c.labelsSetWithType(false, TYPE_POSTGRESQL, false) namespace := c.Namespace listOptions := metav1.ListOptions{ @@ -534,36 +534,55 @@ func (c *Cluster) getPrimaryLoadBalancerIp() (string, error) { // For backward compatibility, shouldAddExtraLabels must be false // when listing k8s objects. See operator PR #252 func (c *Cluster) labelsSet(shouldAddExtraLabels bool) labels.Set { - return c.labelsSetWithType(shouldAddExtraLabels, "") + return c.labelsSetWithType(shouldAddExtraLabels, "", false) } -func (c *Cluster) labelsSetWithType(shouldAddExtraLabels bool, typeLabel PodType) labels.Set { +func (c *Cluster) labelsSetWithType(shouldAddExtraLabels bool, typeLabel PodType, isPod bool) labels.Set { lbls := make(map[string]string) + + // Basic Labels for k, v := range c.OpConfig.ClusterLabels { lbls[k] = v } lbls[c.OpConfig.ClusterNameLabel] = c.Name + if typeLabel != "" { lbls["member.cpo.opensource.cybertec.at/type"] = string(typeLabel) } + // extraLabels (inherited_labels, ...) if shouldAddExtraLabels { + lbls["team"] = c.Postgresql.Spec.TeamID + + if spec, err := c.GetSpec(); err == nil { + for k, v := range spec.ObjectMeta.Labels { + for _, match := range c.OpConfig.InheritedLabels { + if k == match { + lbls[k] = v + } + } + } + } else { + c.logger.Warningf("could not get the list of InheritedLabels for cluster %q: %v", c.Name, err) + } + } + + // add custom labels + if isPod && typeLabel != "" { + // global labels for _, label := range c.Postgresql.Spec.Labels { lbls[label.Name] = label.Value } switch typeLabel { case TYPE_POSTGRESQL: + // pg-specific labels for _, label := range c.Postgresql.Spec.PostgresqlParam.Labels { lbls[label.Name] = label.Value } - if c.Postgresql.Spec.Backup != nil && c.Postgresql.Spec.Backup.Pgbackrest != nil { - for _, label := range c.Postgresql.Spec.Backup.Pgbackrest.Labels { - lbls[label.Name] = label.Value - } - } case TYPE_REPOSITORY, TYPE_BACKUP_JOB: if c.Postgresql.Spec.Backup != nil && c.Postgresql.Spec.Backup.Pgbackrest != nil { + // backup-specific labels for _, label := range c.Postgresql.Spec.Backup.Pgbackrest.Labels { lbls[label.Name] = label.Value } @@ -571,27 +590,12 @@ func (c *Cluster) labelsSetWithType(shouldAddExtraLabels bool, typeLabel PodType case TYPE_POOLER: if c.Postgresql.Spec.ConnectionPooler != nil { + // pooler-specific labels for _, label := range c.Postgresql.Spec.ConnectionPooler.Labels { lbls[label.Name] = label.Value } } } - - // enables filtering resources owned by a team - lbls["team"] = c.Postgresql.Spec.TeamID - - // allow to inherit certain labels from the 'postgres' object - if spec, err := c.GetSpec(); err == nil { - for k, v := range spec.ObjectMeta.Labels { - for _, match := range c.OpConfig.InheritedLabels { - if k == match { - lbls[k] = v - } - } - } - } else { - c.logger.Warningf("could not get the list of InheritedLabels for cluster %q: %v", c.Name, err) - } } return labels.Set(lbls) @@ -599,16 +603,16 @@ func (c *Cluster) labelsSetWithType(shouldAddExtraLabels bool, typeLabel PodType func (c *Cluster) labelsSelector(typeLabel PodType) *metav1.LabelSelector { return &metav1.LabelSelector{ - MatchLabels: c.labelsSetWithType(false, typeLabel), + MatchLabels: c.labelsSetWithType(false, typeLabel, false), MatchExpressions: nil, } } func (c *Cluster) roleLabelsSelector(role PostgresRole) *metav1.LabelSelector { - lbls := c.labelsSetWithType(false, TYPE_POSTGRESQL) + lbls := c.labelsSetWithType(false, TYPE_POSTGRESQL, false) lbls[c.OpConfig.PodRoleLabel] = string(role) return &metav1.LabelSelector{ - MatchLabels: c.labelsSetWithType(false, TYPE_POSTGRESQL), + MatchLabels: c.labelsSetWithType(false, TYPE_POSTGRESQL, false), MatchExpressions: nil, } } @@ -621,7 +625,7 @@ func (c *Cluster) roleLabelsSet(shouldAddExtraLabels bool, role PostgresRole) la lbls = c.labelsSet(shouldAddExtraLabels) //c.labelsSetWithType(shouldAddExtraLabels, TYPE_POSTGRESQL) lbls[c.OpConfig.PodRoleLabel] = string(role) } else { - lbls = c.labelsSetWithType(shouldAddExtraLabels, "") + lbls = c.labelsSetWithType(shouldAddExtraLabels, "", false) } return lbls } From 53fcb13f62a7b43bfbe617f552c55b16278ac278 Mon Sep 17 00:00:00 2001 From: matthias Date: Thu, 26 Mar 2026 19:20:48 +0100 Subject: [PATCH 22/27] update crd and rbac --- charts/postgres-operator/crds/postgresqls.yaml | 18 ++++++++++++++++++ .../templates/clusterrole.yaml | 1 + 2 files changed, 19 insertions(+) diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index 2b24b838..833dacec 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -212,6 +212,12 @@ spec: items: type: object x-kubernetes-preserve-unknown-fields: true + labels: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true databases: type: object additionalProperties: @@ -577,6 +583,12 @@ spec: items: type: object x-kubernetes-preserve-unknown-fields: true + labels: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true preparedDatabases: type: object additionalProperties: @@ -1006,6 +1018,12 @@ spec: items: type: object x-kubernetes-preserve-unknown-fields: true + labels: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true required: - image - repos diff --git a/charts/postgres-operator/templates/clusterrole.yaml b/charts/postgres-operator/templates/clusterrole.yaml index 4df38fa1..8ed777c3 100644 --- a/charts/postgres-operator/templates/clusterrole.yaml +++ b/charts/postgres-operator/templates/clusterrole.yaml @@ -196,6 +196,7 @@ rules: - get - list - patch + - update # to CRUD cron jobs for logical backups - apiGroups: - batch From 1db26bb8fd4e1bf055985a176eb59ce189d83f3d Mon Sep 17 00:00:00 2001 From: matthias Date: Thu, 26 Mar 2026 19:31:52 +0100 Subject: [PATCH 23/27] upodate crd docu --- docs/hugo/content/en/crd/crd-postgresql.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/hugo/content/en/crd/crd-postgresql.md b/docs/hugo/content/en/crd/crd-postgresql.md index 3faa1d6e..73ebc230 100644 --- a/docs/hugo/content/en/crd/crd-postgresql.md +++ b/docs/hugo/content/en/crd/crd-postgresql.md @@ -37,6 +37,7 @@ weight: 331 | enableShmVolume | boolean | false | Start a database pod without limitations on shm memory. By default Docker limit /dev/shm to 64M (see e.g. the docker issue, which could be not enough if PostgreSQL uses parallel workers heavily. If this option is present and value is true, to the target database pod will be mounted a new tmpfs volume to remove this limitation. | | [env](#env) | array | false | Allows you to add custom environment variables to all cluster containers | | [initContainers](#initcontainers) | array | false | Enables the definition of init-containers | +| [labels](#labels) | array | false | Allows you to add custom labels to all cluster pods | | logicalBackupSchedule | string | false | Enables the scheduling of logical backups based on cron-syntax. Example: `30 00 * * *` | | maintenanceWindows | array | false | Enables the definition of maintenance windows for the cluster. Example: `Sat:00:00-04:00` | | masterServiceAnnotations | map | false | Enables the definition of annotations for the Primary Service | @@ -114,6 +115,8 @@ key, operator, value, effect and tolerationSeconds | | Name | Type | required | Description | | ------------------------------ |:-------:| ---------:| ------------------:| | [env](#env) | array | false | Allows you to add custom environment variables to connection-pooler containers | +| [labels](#labels) | array | false | Allows you to add custom labels to connection-pooler pods | +| dockerImage | string | true | Defines the used pgbouncer container image for this cluster | | numberOfInstances | int | true | Number of Pods per Pooler | | mode | string | true | pooling mode for pgBouncer (session, transaction, statement) | | schema | string | true | Schema for Pooler (Default: pooler) | @@ -150,6 +153,17 @@ key, operator, value, effect and tolerationSeconds | --- +#### labels + +| Name | Type | required | Description | +| ------------------------------ |:-------:| ---------:| ------------------:| +| name | string | true | Keyfield for the label-Entry | +| value | string | true | Valuefield for the label-Entry | + +{{< back >}} + +--- + #### monitor | Name | Type | required | Description | @@ -187,6 +201,7 @@ key, operator, value, effect and tolerationSeconds | | Name | Type | required | Description | | ------------------------------ |:-------:| ---------:| ------------------:| | [env](#env) | array | false | Allows you to add custom environment variables to all postgresql containers | +| [labels](#labels) | array | false | Allows you to add custom labels to poostgresql pods | | parameters | map | false | PostgreSQL-Parameter as item (Example: max_connections: "100"). For help check out the [CYBERTEC PostgreSQL Configurator](https://pgconfigurator.cybertec.at) | | version | string | false | a map of key-value pairs describing initdb parameters | @@ -407,6 +422,7 @@ key, operator, value, effect and tolerationSeconds | | [env](#env) | array | false | Allows you to add custom environment variables to all pgbackrest containers | | global | object | false | | | image | string | true | | +| [labels](#labels) | array | false | Allows you to add custom labels to pgbackrest pods | | [repos](#repos) | array | true | | | [resources](#resources) | object | false | CPU & Memory (Limit & Request) definition for the pgBackRest container| From cdc24a68d6eb0669f1225013897480bf2053496b Mon Sep 17 00:00:00 2001 From: matthias Date: Thu, 26 Mar 2026 20:29:57 +0100 Subject: [PATCH 24/27] change label syntax in crd and all references --- .../content/en/connection_pooler/_index.md | 2 ++ docs/hugo/content/en/crd/crd-postgresql.md | 12 +++---- docs/hugo/content/en/customize_cluster/env.md | 6 ++-- .../content/en/customize_cluster/labels.md | 27 ++++++++++++++++ .../cpo.opensource.cybertec.at/v1/crds.go | 32 +++++++++---------- .../v1/postgresql_type.go | 22 ++++++------- pkg/cluster/cluster.go | 2 +- pkg/cluster/util.go | 16 +++++----- 8 files changed, 74 insertions(+), 45 deletions(-) create mode 100644 docs/hugo/content/en/customize_cluster/labels.md diff --git a/docs/hugo/content/en/connection_pooler/_index.md b/docs/hugo/content/en/connection_pooler/_index.md index 93c6093a..3a5f7600 100644 --- a/docs/hugo/content/en/connection_pooler/_index.md +++ b/docs/hugo/content/en/connection_pooler/_index.md @@ -42,6 +42,8 @@ spec: env: - name: POOLER_ENV value: 'custom value' + labels: + custom_pooler_label: 'custom value' mode: transaction numberOfInstances: 2 resources: diff --git a/docs/hugo/content/en/crd/crd-postgresql.md b/docs/hugo/content/en/crd/crd-postgresql.md index 73ebc230..895d3600 100644 --- a/docs/hugo/content/en/crd/crd-postgresql.md +++ b/docs/hugo/content/en/crd/crd-postgresql.md @@ -37,7 +37,7 @@ weight: 331 | enableShmVolume | boolean | false | Start a database pod without limitations on shm memory. By default Docker limit /dev/shm to 64M (see e.g. the docker issue, which could be not enough if PostgreSQL uses parallel workers heavily. If this option is present and value is true, to the target database pod will be mounted a new tmpfs volume to remove this limitation. | | [env](#env) | array | false | Allows you to add custom environment variables to all cluster containers | | [initContainers](#initcontainers) | array | false | Enables the definition of init-containers | -| [labels](#labels) | array | false | Allows you to add custom labels to all cluster pods | +| [labels](#labels) | object | false | Allows you to add custom labels to all cluster pods | | logicalBackupSchedule | string | false | Enables the scheduling of logical backups based on cron-syntax. Example: `30 00 * * *` | | maintenanceWindows | array | false | Enables the definition of maintenance windows for the cluster. Example: `Sat:00:00-04:00` | | masterServiceAnnotations | map | false | Enables the definition of annotations for the Primary Service | @@ -115,7 +115,7 @@ key, operator, value, effect and tolerationSeconds | | Name | Type | required | Description | | ------------------------------ |:-------:| ---------:| ------------------:| | [env](#env) | array | false | Allows you to add custom environment variables to connection-pooler containers | -| [labels](#labels) | array | false | Allows you to add custom labels to connection-pooler pods | +| [labels](#labels) | object | false | Allows you to add custom labels to connection-pooler pods | | dockerImage | string | true | Defines the used pgbouncer container image for this cluster | | numberOfInstances | int | true | Number of Pods per Pooler | | mode | string | true | pooling mode for pgBouncer (session, transaction, statement) | @@ -157,8 +157,8 @@ key, operator, value, effect and tolerationSeconds | | Name | Type | required | Description | | ------------------------------ |:-------:| ---------:| ------------------:| -| name | string | true | Keyfield for the label-Entry | -| value | string | true | Valuefield for the label-Entry | +| | string | true | Namefield for the label | +| | string | true | Value for the label | {{< back >}} @@ -201,7 +201,7 @@ key, operator, value, effect and tolerationSeconds | | Name | Type | required | Description | | ------------------------------ |:-------:| ---------:| ------------------:| | [env](#env) | array | false | Allows you to add custom environment variables to all postgresql containers | -| [labels](#labels) | array | false | Allows you to add custom labels to poostgresql pods | +| [labels](#labels) | object | false | Allows you to add custom labels to poostgresql pods | | parameters | map | false | PostgreSQL-Parameter as item (Example: max_connections: "100"). For help check out the [CYBERTEC PostgreSQL Configurator](https://pgconfigurator.cybertec.at) | | version | string | false | a map of key-value pairs describing initdb parameters | @@ -422,7 +422,7 @@ key, operator, value, effect and tolerationSeconds | | [env](#env) | array | false | Allows you to add custom environment variables to all pgbackrest containers | | global | object | false | | | image | string | true | | -| [labels](#labels) | array | false | Allows you to add custom labels to pgbackrest pods | +| [labels](#labels) | object | false | Allows you to add custom labels to pgbackrest pods | | [repos](#repos) | array | true | | | [resources](#resources) | object | false | CPU & Memory (Limit & Request) definition for the pgBackRest container| diff --git a/docs/hugo/content/en/customize_cluster/env.md b/docs/hugo/content/en/customize_cluster/env.md index 63acb437..3f3571f8 100644 --- a/docs/hugo/content/en/customize_cluster/env.md +++ b/docs/hugo/content/en/customize_cluster/env.md @@ -13,9 +13,9 @@ The variables are defined within the Custom Resource (CR). The following logic a | :--- | :--- | :--- | | `spec.env` | **Global** | These ENVs are inherited by **all** containers within the cluster (PostgreSQL, Backup, Monitoring, etc.). | | `spec.postgresql.env` | **PostgreSQL** | These ENVs apply exclusively to the **PostgreSQL containers**. | -| `spec.backup.pgbackrest.env` | **PostgreSQL** | These ENVs apply exclusively to the **Backup containers**. | -| `spec.monitor.env` | **PostgreSQL** | These ENVs apply exclusively to the **ConnectionPooler containers**. | -| `spec.connectionPooler.env` | **PostgreSQL** | These ENVs apply exclusively to the **Monitoring sidecars**. | +| `spec.backup.pgbackrest.env` | **pgBackRest** | These ENVs apply exclusively to the **Backup containers**. | +| `spec.monitor.env` | **Exporter-Sidecar** | These ENVs apply exclusively to the **ConnectionPooler containers**. | +| `spec.connectionPooler.env` | **ConnectionPooler** | These ENVs apply exclusively to the **Monitoring sidecars**. | {{< hint type=Warning >}}Updating the ENVs triggers a rolling update to the respective containers.{{< /hint >}} diff --git a/docs/hugo/content/en/customize_cluster/labels.md b/docs/hugo/content/en/customize_cluster/labels.md new file mode 100644 index 00000000..f941b83a --- /dev/null +++ b/docs/hugo/content/en/customize_cluster/labels.md @@ -0,0 +1,27 @@ +--- +title: "Custom Labels" +date: 2023-12-28T14:26:51+01:00 +draft: false +weight: 2 +--- + +To manage and organise pods flexibly within a cluster, the operator allows labels to be defined at various levels. This enables both global labelling and specific metadata for individual components. Unlike environment variables, labels always refer to the pod as a whole, not to individual containers. + +| object | Scope | Description | +| :--- | :--- | :--- | +| `spec.labels` | **Global** | These labels are adopted by **all** pods within the cluster (**PostgreSQL**, **Backup**, **Pooler**, etc.). | +| `spec.postgresql.labels` | **PostgreSQL** | These labels apply exclusively to the PostgreSQL pods. **PostgreSQL pods**. | +| `spec.backup.pgbackrest.labels` | **pgBackRest** | These labels apply exclusively to the backup pods **pgBackRest pods**. | +| `spec.connectionPooler.labels` | **ConnectionPooler** | These labels apply exclusively to the **ConnectionPooler pods**. | + +{{< hint type=Warning >}}Updating the labels triggers a rolling update to the respective pods.{{< /hint >}} + + +### Configuration Logic + +The definition of labels follows the standard Kubernetes schema for key-value pairs. + +```yaml +labels: + custom_label: ‘value’ +``` \ No newline at end of file diff --git a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go index fa2951c3..9ffcf297 100644 --- a/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go +++ b/pkg/apis/cpo.opensource.cybertec.at/v1/crds.go @@ -349,12 +349,12 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, "labels": { - Type: "array", + Type: "object", Nullable: true, - Items: &apiextv1.JSONSchemaPropsOrArray{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: util.True(), + Type: "string", }, }, }, @@ -406,12 +406,12 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, "labels": { - Type: "array", + Type: "object", Nullable: true, - Items: &apiextv1.JSONSchemaPropsOrArray{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: util.True(), + Type: "string", }, }, }, @@ -800,12 +800,12 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, "labels": { - Type: "array", + Type: "object", Nullable: true, - Items: &apiextv1.JSONSchemaPropsOrArray{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: util.True(), + Type: "string", }, }, }, @@ -1494,12 +1494,12 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, "labels": { - Type: "array", + Type: "object", Nullable: true, - Items: &apiextv1.JSONSchemaPropsOrArray{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, Schema: &apiextv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: util.True(), + Type: "string", }, }, }, diff --git a/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go b/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go index 1c5da833..33c30203 100644 --- a/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go +++ b/pkg/apis/cpo.opensource.cybertec.at/v1/postgresql_type.go @@ -89,7 +89,7 @@ type PostgresSpec struct { AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"` Streams []Stream `json:"streams,omitempty"` Env []v1.EnvVar `json:"env,omitempty"` - Labels []v1.EnvVar `name:"labels" default:""` + Labels map[string]string `json:"labels,omitempty" name:"labels" default:""` Backup *Backup `json:"backup,omitempty"` TDE *TDE `json:"tde,omitempty"` Monitoring *Monitoring `json:"monitor,omitempty"` @@ -156,7 +156,7 @@ type PostgresqlParam struct { PgVersion string `json:"version"` Parameters map[string]string `json:"parameters,omitempty"` Env []v1.EnvVar `json:"env,omitempty"` - Labels []v1.EnvVar `name:"labels" default:""` + Labels map[string]string `json:"labels,omitempty" name:"labels" default:""` } // ResourceDescription describes CPU and memory resources defined for a cluster. @@ -249,14 +249,14 @@ type PostgresStatus struct { // makes sense to expose. E.g. pool size (min/max boundaries), max client // connections etc. type ConnectionPooler struct { - NumberOfInstances *int32 `json:"numberOfInstances,omitempty"` - Schema string `json:"schema,omitempty"` - User string `json:"user,omitempty"` - Mode string `json:"mode,omitempty"` - DockerImage string `json:"dockerImage,omitempty"` - MaxDBConnections *int32 `json:"maxDBConnections,omitempty"` - Env []v1.EnvVar `json:"env,omitempty"` - Labels []v1.EnvVar `name:"labels" default:""` + NumberOfInstances *int32 `json:"numberOfInstances,omitempty"` + Schema string `json:"schema,omitempty"` + User string `json:"user,omitempty"` + Mode string `json:"mode,omitempty"` + DockerImage string `json:"dockerImage,omitempty"` + MaxDBConnections *int32 `json:"maxDBConnections,omitempty"` + Env []v1.EnvVar `json:"env,omitempty"` + Labels map[string]string `json:"labels,omitempty" name:"labels" default:""` *Resources `json:"resources,omitempty"` } @@ -291,7 +291,7 @@ type Pgbackrest struct { Configuration Configuration `json:"configuration"` Resources *Resources `json:"resources,omitempty"` Env []v1.EnvVar `json:"env,omitempty"` - Labels []v1.EnvVar `name:"labels" default:""` + Labels map[string]string `json:"labels,omitempty" name:"labels" default:""` } type PgbackrestClone struct { diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 58f60a1f..d3db6146 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -1029,7 +1029,7 @@ func (c *Cluster) Update(oldSpec, newSpec *cpov1.Postgresql) error { } // Label-check for pgbackrest-pods - var oldRepoL, newRepoL []v1.EnvVar + var oldRepoL, newRepoL map[string]string if oldSpec.Spec.Backup != nil && oldSpec.Spec.Backup.Pgbackrest != nil { oldRepoL = oldSpec.Spec.Backup.Pgbackrest.Labels diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 19dba9c4..f22da527 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -570,29 +570,29 @@ func (c *Cluster) labelsSetWithType(shouldAddExtraLabels bool, typeLabel PodType // add custom labels if isPod && typeLabel != "" { // global labels - for _, label := range c.Postgresql.Spec.Labels { - lbls[label.Name] = label.Value + for k, v := range c.Postgresql.Spec.Labels { + lbls[k] = v } switch typeLabel { case TYPE_POSTGRESQL: // pg-specific labels - for _, label := range c.Postgresql.Spec.PostgresqlParam.Labels { - lbls[label.Name] = label.Value + for k, v := range c.Postgresql.Spec.PostgresqlParam.Labels { + lbls[k] = v } case TYPE_REPOSITORY, TYPE_BACKUP_JOB: if c.Postgresql.Spec.Backup != nil && c.Postgresql.Spec.Backup.Pgbackrest != nil { // backup-specific labels - for _, label := range c.Postgresql.Spec.Backup.Pgbackrest.Labels { - lbls[label.Name] = label.Value + for k, v := range c.Postgresql.Spec.Backup.Pgbackrest.Labels { + lbls[k] = v } } case TYPE_POOLER: if c.Postgresql.Spec.ConnectionPooler != nil { // pooler-specific labels - for _, label := range c.Postgresql.Spec.ConnectionPooler.Labels { - lbls[label.Name] = label.Value + for k, v := range c.Postgresql.Spec.ConnectionPooler.Labels { + lbls[k] = v } } } From 9e16d9fa9e9a416b45a992b2bf27b4437a827ed5 Mon Sep 17 00:00:00 2001 From: matthias Date: Thu, 26 Mar 2026 21:05:24 +0100 Subject: [PATCH 25/27] ensure pgbackrest-job is updated in case if label changes --- pkg/cluster/sync.go | 48 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 5 deletions(-) diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 1ccbb4dc..0b843b3d 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -23,6 +23,7 @@ import ( "github.com/cybertec-postgresql/cybertec-pg-operator/pkg/util" "github.com/cybertec-postgresql/cybertec-pg-operator/pkg/util/constants" "github.com/cybertec-postgresql/cybertec-pg-operator/pkg/util/k8sutil" + "github.com/cybertec-postgresql/cybertec-pg-operator/pkg/util/retryutil" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" @@ -1675,16 +1676,53 @@ func (c *Cluster) syncPgbackrestJob(forceRemove bool) error { return fmt.Errorf("could not generate pgbackrest job: %v", err) } remove = false - if _, err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(context.TODO(), c.getPgbackrestJobName(repo.Name, name), metav1.GetOptions{}); err == nil { - if err := c.patchPgbackrestJob(job); err != nil { - return fmt.Errorf("could not update a pgbackrest cronjob: %v", err) + + currentJob, err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(context.TODO(), c.getPgbackrestJobName(repo.Name, name), metav1.GetOptions{}) + + if err == nil { + if !reflect.DeepEqual(currentJob.Spec.JobTemplate.Spec.Selector, job.Spec.JobTemplate.Spec.Selector) { + c.logger.Warningf("selector changed for pgbackrest cronjob %s, recreating to avoid immutable field error", job.Name) + + err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Delete(context.TODO(), job.Name, metav1.DeleteOptions{}) + if err != nil && !k8sutil.ResourceNotFound(err) { + return fmt.Errorf("could not delete pgbackrest cronjob for recreation: %v", err) + } + + err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, + func() (bool, error) { + _, err2 := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(context.TODO(), job.Name, metav1.GetOptions{}) + return k8sutil.ResourceNotFound(err2), nil + }) + if err != nil { + return fmt.Errorf("timeout waiting for pgbackrest cronjob deletion: %v", err) + } + + if err := c.createPgbackrestJob(job); err != nil { + return fmt.Errorf("could not recreate pgbackrest cronjob: %v", err) + } + c.logger.Infof("pgbackrest cronjob for %v %v has been successfully recreated", rep, schedul) + + } else { + currentJob.Labels = job.Labels + currentJob.Annotations = job.Annotations + currentJob.Spec.Schedule = job.Spec.Schedule + currentJob.Spec.JobTemplate.ObjectMeta.Labels = job.Spec.JobTemplate.ObjectMeta.Labels + currentJob.Spec.JobTemplate.Spec.Template.ObjectMeta.Labels = job.Spec.JobTemplate.Spec.Template.ObjectMeta.Labels + currentJob.Spec.JobTemplate.Spec.Template.Spec = job.Spec.JobTemplate.Spec.Template.Spec + + _, err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Update(context.TODO(), currentJob, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update pgbackrest cronjob via Update call: %v", err) + } + c.logger.Infof("pgbackrest cronjob for %v %v has been successfully updated (history preserved)", rep, schedul) } - c.logger.Infof("pgbackrest cronjob for %v %v has been successfully updated", rep, schedul) - } else { + } else if k8sutil.ResourceNotFound(err) { if err := c.createPgbackrestJob(job); err != nil { return fmt.Errorf("could not create a pgbackrest cronjob: %v", err) } c.logger.Infof("pgbackrest cronjob for %v %v has been successfully created", rep, schedul) + } else { + return fmt.Errorf("could not get pgbackrest cronjob: %v", err) } } } From 40669cc71e7da2c4244ce997f513951f67363312 Mon Sep 17 00:00:00 2001 From: matthias Date: Thu, 26 Mar 2026 21:17:27 +0100 Subject: [PATCH 26/27] add logging info for labels-sync --- pkg/cluster/sync.go | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 0b843b3d..351a2def 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -189,6 +189,26 @@ func (c *Cluster) Sync(newSpec *cpov1.Postgresql) error { } }() + // Label-check for pg-pods + pgLabelsChanged := !reflect.DeepEqual(oldSpec.Spec.Labels, newSpec.Spec.Labels) || + !reflect.DeepEqual(oldSpec.Spec.PostgresqlParam.Labels, newSpec.Spec.PostgresqlParam.Labels) + + // Label-check for pgbackrest-pods + var oldRepoL, newRepoL map[string]string + if oldSpec.Spec.Backup != nil && oldSpec.Spec.Backup.Pgbackrest != nil { + oldRepoL = oldSpec.Spec.Backup.Pgbackrest.Labels + } + if newSpec.Spec.Backup != nil && newSpec.Spec.Backup.Pgbackrest != nil { + newRepoL = newSpec.Spec.Backup.Pgbackrest.Labels + } + + repoLabelsChanged := !reflect.DeepEqual(oldSpec.Spec.Labels, newSpec.Spec.Labels) || + !reflect.DeepEqual(oldRepoL, newRepoL) + + if pgLabelsChanged || repoLabelsChanged { + c.logger.Infof("Labels drift detected in Sync: pgLabelsChanged=%v, repoLabelsChanged=%v", pgLabelsChanged, repoLabelsChanged) + } + // Make sure we know about any in progress restores before touching other stuff if err = c.refreshRestoreConfigMap(); err != nil { return fmt.Errorf("error refreshing restore configmap: %v", err) @@ -210,13 +230,13 @@ func (c *Cluster) Sync(newSpec *cpov1.Postgresql) error { return err } - if err = c.syncPgbackrestConfig(); err != nil { - err = fmt.Errorf("could not sync pgbackrest repo-host config: %v", err) + if err = c.syncPgbackrestRepoHostConfig(&c.Spec); err != nil { + err = fmt.Errorf("could not sync pgbackrest config: %v", err) return err } if err = c.syncPgbackrestRepoHostConfig(&c.Spec); err != nil { - err = fmt.Errorf("could not sync pgbackrest config: %v", err) + err = fmt.Errorf("could not sync pgbackrest repo-host config: %v", err) return err } From cb675a286ef6d6d386cb06da8a15e38401b6781c Mon Sep 17 00:00:00 2001 From: matthias Date: Thu, 26 Mar 2026 21:18:50 +0100 Subject: [PATCH 27/27] update labels in crd chart --- .../postgres-operator/crds/postgresqls.yaml | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index 833dacec..4dd54c16 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -213,11 +213,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true labels: - type: array + type: object nullable: true - items: - type: object - x-kubernetes-preserve-unknown-fields: true + additionalProperties: + type: string databases: type: object additionalProperties: @@ -584,11 +583,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true labels: - type: array + type: object nullable: true - items: - type: object - x-kubernetes-preserve-unknown-fields: true + additionalProperties: + type: string preparedDatabases: type: object additionalProperties: @@ -1019,11 +1017,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true labels: - type: array + type: object nullable: true - items: - type: object - x-kubernetes-preserve-unknown-fields: true + additionalProperties: + type: string required: - image - repos