Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion api/v4/postgrescluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ type ManagedRole struct {
// Exists controls whether the role should be present (true) or absent (false) in PostgreSQL.
// +kubebuilder:default=true
// +optional
Exists bool `json:"exists,omitempty"`
Exists bool `json:"exists"`
}

// PostgresClusterSpec defines the desired state of PostgresCluster.
Expand Down
2 changes: 1 addition & 1 deletion internal/controller/postgrescluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ import (
* PC-09 ignores no-op updates
*/

var _ = Describe("PostgresCluster Controller", func() {
var _ = Describe("PostgresCluster Controller", Label("postgres"), func() {

const (
postgresVersion = "15.10"
Expand Down
70 changes: 68 additions & 2 deletions internal/controller/postgresdatabase_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,6 +309,15 @@ func seedOwnedDatabaseArtifacts(ctx context.Context, namespace, resourceName, cl
}
}

func expectManagedRoleExists(cluster *enterprisev4.PostgresCluster, roleName string, exists bool) {
rolesByName := make(map[string]enterprisev4.ManagedRole, len(cluster.Spec.ManagedRoles))
for _, r := range cluster.Spec.ManagedRoles {
rolesByName[r.Name] = r
}
Expect(rolesByName).To(HaveKey(roleName))
Expect(rolesByName[roleName].Exists).To(Equal(exists), "role %s: expected Exists=%v", roleName, exists)
}

func expectRetainedArtifact(ctx context.Context, name, namespace, resourceName string, obj client.Object) {
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, obj)).To(Succeed())
Expect(obj.GetAnnotations()).To(HaveKeyWithValue("enterprise.splunk.com/retained-from", resourceName))
Expand Down Expand Up @@ -344,7 +353,7 @@ func expectReadyStatus(current *enterprisev4.PostgresDatabase, generation int64,
Expect(current.Status.Databases[0].ConfigMapRef).NotTo(BeNil())
}

var _ = Describe("PostgresDatabase Controller", func() {
var _ = Describe("PostgresDatabase Controller", Label("postgres"), func() {
var (
ctx context.Context
namespace string
Expand Down Expand Up @@ -493,6 +502,59 @@ var _ = Describe("PostgresDatabase Controller", func() {
})
})

When("a database is removed from spec.databases while the CR stays alive", func() {
It("marks the removed database roles as absent in postgres cluster and keeps the retained roles present", func() {
resourceName := "live-db-removal"
clusterName := "live-db-removal-postgres"
cnpgClusterName := "live-db-removal-cnpg"
requestName := types.NamespacedName{Name: resourceName, Namespace: namespace}

postgresDB := createPostgresDatabaseResource(ctx, namespace, resourceName, clusterName, []enterprisev4.DatabaseDefinition{
{Name: "keepdb"},
{Name: "dropdb"},
}, postgresDatabaseFinalizer)
Expect(k8sClient.Get(ctx, requestName, postgresDB)).To(Succeed())

postgresCluster := createPostgresClusterResource(ctx, namespace, clusterName)
markPostgresClusterReady(ctx, postgresCluster, cnpgClusterName, namespace, false)
cnpgCluster := createCNPGClusterResource(ctx, namespace, cnpgClusterName)
markCNPGClusterReady(ctx, cnpgCluster, []string{"keepdb_admin", "keepdb_rw", "dropdb_admin", "dropdb_rw"}, "tenant-rw", "tenant-ro")

initialRolesPatch := &unstructured.Unstructured{
Object: map[string]any{
"apiVersion": enterprisev4.GroupVersion.String(),
"kind": "PostgresCluster",
"metadata": map[string]any{"name": clusterName, "namespace": namespace},
"spec": map[string]any{
"managedRoles": []map[string]any{
{"name": "keepdb_admin", "exists": true, "passwordSecretRef": map[string]any{"name": resourceName + "-keepdb-admin", "key": "password"}},
{"name": "keepdb_rw", "exists": true, "passwordSecretRef": map[string]any{"name": resourceName + "-keepdb-rw", "key": "password"}},
{"name": "dropdb_admin", "exists": true, "passwordSecretRef": map[string]any{"name": resourceName + "-dropdb-admin", "key": "password"}},
{"name": "dropdb_rw", "exists": true, "passwordSecretRef": map[string]any{"name": resourceName + "-dropdb-rw", "key": "password"}},
},
},
},
}
Expect(k8sClient.Patch(ctx, initialRolesPatch, client.Apply, client.FieldOwner("postgresdatabase-"+resourceName))).To(Succeed())

seedOwnedDatabaseArtifacts(ctx, namespace, resourceName, clusterName, postgresDB, "keepdb", "dropdb")

postgresDB.Spec.Databases = []enterprisev4.DatabaseDefinition{{Name: "keepdb"}}
Expect(k8sClient.Update(ctx, postgresDB)).To(Succeed())

result, err := reconcilePostgresDatabase(ctx, requestName)
expectReconcileResult(result, err, 15*time.Second)

updatedCluster := &enterprisev4.PostgresCluster{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, updatedCluster)).To(Succeed())

expectManagedRoleExists(updatedCluster, "keepdb_admin", true)
expectManagedRoleExists(updatedCluster, "keepdb_rw", true)
expectManagedRoleExists(updatedCluster, "dropdb_admin", false)
expectManagedRoleExists(updatedCluster, "dropdb_rw", false)
})
})

When("the PostgresDatabase is being deleted", func() {
Context("with retained and deleted databases", func() {
It("orphans retained resources, removes deleted resources, and patches managed roles", func() {
Expand Down Expand Up @@ -547,7 +609,11 @@ var _ = Describe("PostgresDatabase Controller", func() {

updatedCluster := &enterprisev4.PostgresCluster{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, updatedCluster)).To(Succeed())
Expect(managedRoleNames(updatedCluster.Spec.ManagedRoles)).To(ConsistOf("keepdb_admin", "keepdb_rw"))

expectManagedRoleExists(updatedCluster, "keepdb_admin", true)
expectManagedRoleExists(updatedCluster, "keepdb_rw", true)
expectManagedRoleExists(updatedCluster, "dropdb_admin", false)
expectManagedRoleExists(updatedCluster, "dropdb_rw", false)

current := &enterprisev4.PostgresDatabase{}
err = k8sClient.Get(ctx, requestName, current)
Expand Down
1 change: 0 additions & 1 deletion pkg/postgresql/cluster/core/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -606,7 +606,6 @@ func reconcileManagedRoles(ctx context.Context, c client.Client, cluster *enterp
Name: role.Name,
Ensure: cnpgv1.EnsureAbsent,
}
// Exists bool replaces the old Ensure string enum ("present"/"absent").
if role.Exists {
r.Ensure = cnpgv1.EnsurePresent
r.Login = true
Expand Down
133 changes: 93 additions & 40 deletions pkg/postgresql/database/core/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,31 +172,29 @@ func PostgresDatabaseService(
}

// Phase: RoleProvisioning
desiredUsers := getDesiredUsers(postgresDB)
actualRoles := getUsersInClusterSpec(cluster)
var missing []string
for _, role := range desiredUsers {
if !slices.Contains(actualRoles, role) {
missing = append(missing, role)
}
}

if len(missing) > 0 {
logger.Info("CNPG Cluster patch started, missing roles detected", "missing", missing)
if err := patchManagedRoles(ctx, c, postgresDB, cluster); err != nil {
fieldManager := fieldManagerName(postgresDB.Name)
desired := buildDesiredRoles(postgresDB.Name, postgresDB.Spec.Databases)
rolesToAdd := findAddedRoleNames(cluster, desired)
rolesToRemove := absentRolesByName(findRemovedRoleNames(cluster, fieldManager, desired))
allRoles := append(desired, rolesToRemove...)

if len(rolesToAdd) > 0 || len(rolesToRemove) > 0 {
logger.Info("CNPG Cluster patch started, role drift detected", "toAdd", len(rolesToAdd), "toRemove", len(rolesToRemove))
if err := patchManagedRoles(ctx, c, fieldManager, cluster, allRoles); err != nil {
logger.Error(err, "Failed to patch users in CNPG Cluster")
rc.emitWarning(postgresDB, EventManagedRolesPatchFailed, fmt.Sprintf("Failed to patch managed roles: %v", err))
return ctrl.Result{}, err
}
rc.emitNormal(postgresDB, EventRoleReconciliationStarted, fmt.Sprintf("Patched managed roles, waiting for %d roles to reconcile", len(desiredUsers)))
rc.emitNormal(postgresDB, EventRoleReconciliationStarted, fmt.Sprintf("Patched managed roles: %d to add, %d to remove", len(rolesToAdd), len(rolesToRemove)))
if err := updateStatus(rolesReady, metav1.ConditionFalse, reasonWaitingForCNPG,
fmt.Sprintf("Waiting for %d roles to be reconciled", len(desiredUsers)), provisioningDBPhase); err != nil {
fmt.Sprintf("Waiting for roles to be reconciled: %d to add, %d to remove", len(rolesToAdd), len(rolesToRemove)), provisioningDBPhase); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{RequeueAfter: retryDelay}, nil
}

notReadyRoles, err := verifyRolesReady(ctx, desiredUsers, cnpgCluster)
roleNames := getDesiredUsers(postgresDB)
notReadyRoles, err := verifyRolesReady(ctx, roleNames, cnpgCluster)
if err != nil {
rc.emitWarning(postgresDB, EventRoleFailed, fmt.Sprintf("Role reconciliation failed: %v", err))
if statusErr := updateStatus(rolesReady, metav1.ConditionFalse, reasonUsersCreationFailed,
Expand All @@ -212,9 +210,9 @@ func PostgresDatabaseService(
}
return ctrl.Result{RequeueAfter: retryDelay}, nil
}
rc.emitOnConditionTransition(postgresDB, postgresDB.Status.Conditions, rolesReady, EventRolesReady, fmt.Sprintf("All %d roles reconciled", len(desiredUsers)))
rc.emitOnConditionTransition(postgresDB, postgresDB.Status.Conditions, rolesReady, EventRolesReady, fmt.Sprintf("Roles reconciled: %d active, %d removed", len(rolesToAdd), len(rolesToRemove)))
if err := updateStatus(rolesReady, metav1.ConditionTrue, reasonUsersAvailable,
fmt.Sprintf("All %d users in PostgreSQL", len(desiredUsers)), provisioningDBPhase); err != nil {
fmt.Sprintf("Roles reconciled: %d active, %d removed", len(rolesToAdd), len(rolesToRemove)), provisioningDBPhase); err != nil {
return ctrl.Result{}, err
}

Expand Down Expand Up @@ -366,6 +364,9 @@ func getUsersInClusterSpec(cluster *enterprisev4.PostgresCluster) []string {
return users
}

// rolesMatchClusterSpec returns true if desired and actual contain the same roles
// (by name and Exists state), regardless of order.

func getRoleConflicts(postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster) []string {
myManager := fieldManagerName(postgresDB.Name)
desired := make(map[string]struct{}, len(postgresDB.Spec.Databases)*2)
Expand Down Expand Up @@ -413,18 +414,16 @@ func parseRoleNames(raw []byte) []string {
return names
}

func patchManagedRoles(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster) error {
func patchManagedRoles(ctx context.Context, c client.Client, fieldManager string, cluster *enterprisev4.PostgresCluster, roles []enterprisev4.ManagedRole) error {
logger := log.FromContext(ctx)
allRoles := buildManagedRoles(postgresDB.Name, postgresDB.Spec.Databases)
rolePatch, err := buildManagedRolesPatch(cluster, allRoles, c.Scheme())
rolePatch, err := buildManagedRolesPatch(cluster, roles, c.Scheme())
if err != nil {
return fmt.Errorf("building managed roles patch for PostgresDatabase %s: %w", postgresDB.Name, err)
return fmt.Errorf("building managed roles patch: %w", err)
}
fieldManager := fieldManagerName(postgresDB.Name)
if err := c.Patch(ctx, rolePatch, client.Apply, client.FieldOwner(fieldManager)); err != nil {
return fmt.Errorf("patching managed roles for PostgresDatabase %s: %w", postgresDB.Name, err)
return fmt.Errorf("patching managed roles: %w", err)
}
logger.Info("Users added to PostgresCluster via SSA", "roleCount", len(allRoles))
logger.Info("Managed roles patched", "count", len(roles))
return nil
}

Expand Down Expand Up @@ -580,7 +579,15 @@ func cleanupManagedRoles(ctx context.Context, c client.Client, postgresDB *enter
logger.Info("PostgresCluster already deleted, skipping role cleanup")
return nil
}
return patchManagedRolesOnDeletion(ctx, c, postgresDB, cluster, plan.retained)
fieldManager := fieldManagerName(postgresDB.Name)
retainedRoles := buildDesiredRoles(postgresDB.Name, plan.retained)
rolesToRemove := buildRolesToRemove(plan.deleted)
allRoles := append(retainedRoles, rolesToRemove...)
if err := patchManagedRoles(ctx, c, fieldManager, cluster, allRoles); err != nil {
return err
}
logger.Info("Managed roles patched on deletion", "retained", len(retainedRoles), "removed", len(rolesToRemove))
return nil
}

func orphanCNPGDatabases(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error {
Expand Down Expand Up @@ -716,7 +723,67 @@ func deleteSecrets(ctx context.Context, c client.Client, postgresDB *enterprisev
return nil
}

func buildManagedRoles(postgresDBName string, databases []enterprisev4.DatabaseDefinition) []enterprisev4.ManagedRole {
// buildRolesToRemove produces Exists:false entries for the given databases so CNPG drops their roles.
func buildRolesToRemove(databases []enterprisev4.DatabaseDefinition) []enterprisev4.ManagedRole {
roles := make([]enterprisev4.ManagedRole, 0, len(databases)*2)
for _, dbSpec := range databases {
roles = append(roles,
enterprisev4.ManagedRole{Name: adminRoleName(dbSpec.Name), Exists: false},
enterprisev4.ManagedRole{Name: rwRoleName(dbSpec.Name), Exists: false},
)
}
return roles
}

// absentRolesByName produces Exists:false entries from a list of raw role names.
// Used by the normal reconcile path where names come from SSA field manager parsing.
func absentRolesByName(names []string) []enterprisev4.ManagedRole {
roles := make([]enterprisev4.ManagedRole, 0, len(names))
for _, name := range names {
roles = append(roles, enterprisev4.ManagedRole{Name: name, Exists: false})
}
return roles
}

// findAddedRoleNames returns role names from the desired list that are missing
// from the cluster spec or currently marked absent.
func findAddedRoleNames(cluster *enterprisev4.PostgresCluster, desired []enterprisev4.ManagedRole) []string {
current := make(map[string]bool, len(cluster.Spec.ManagedRoles))
for _, r := range cluster.Spec.ManagedRoles {
current[r.Name] = r.Exists
}
var toAdd []string
for _, r := range desired {
exists, found := current[r.Name]
if !found || !exists {
toAdd = append(toAdd, r.Name)
}
}
return toAdd
}

// findRemovedRoleNames returns role names currently owned by this field manager
// in the cluster spec that are absent from the desired list.
func findRemovedRoleNames(cluster *enterprisev4.PostgresCluster, manager string, desired []enterprisev4.ManagedRole) []string {
desiredSet := make(map[string]struct{}, len(desired))
for _, r := range desired {
desiredSet[r.Name] = struct{}{}
}
owners := managedRoleOwners(cluster.ManagedFields)
var toRemove []string
for name, owner := range owners {
if owner == manager {
if _, ok := desiredSet[name]; !ok {
toRemove = append(toRemove, name)
}
}
}
return toRemove
}

// buildDesiredRoles builds the full set of roles that should be present for the given databases.
// This is the input to findAddedRoleNames and findRemovedRoleNames.
func buildDesiredRoles(postgresDBName string, databases []enterprisev4.DatabaseDefinition) []enterprisev4.ManagedRole {
roles := make([]enterprisev4.ManagedRole, 0, len(databases)*2)
for _, dbSpec := range databases {
roles = append(roles,
Expand Down Expand Up @@ -752,20 +819,6 @@ func buildManagedRolesPatch(cluster *enterprisev4.PostgresCluster, roles []enter
}, nil
}

func patchManagedRolesOnDeletion(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster, retained []enterprisev4.DatabaseDefinition) error {
logger := log.FromContext(ctx)
roles := buildManagedRoles(postgresDB.Name, retained)
rolePatch, err := buildManagedRolesPatch(cluster, roles, c.Scheme())
if err != nil {
return fmt.Errorf("building managed roles patch: %w", err)
}
if err := c.Patch(ctx, rolePatch, client.Apply, client.FieldOwner(fieldManagerName(postgresDB.Name))); err != nil {
return fmt.Errorf("patching managed roles on deletion: %w", err)
}
logger.Info("Managed roles patched on deletion", "retainedRoles", len(roles))
return nil
}

func stripOwnerReference(obj metav1.Object, ownerUID types.UID) {
refs := obj.GetOwnerReferences()
filtered := make([]metav1.OwnerReference, 0, len(refs))
Expand Down
Loading
Loading