From 03d5a2dfcdb05a618250798efa0bf6b9f35da960 Mon Sep 17 00:00:00 2001 From: tarunsinghot Date: Fri, 13 Dec 2024 17:37:23 +0530 Subject: [PATCH] Added Percona MongoDB helm chart with backup support (#230) * added helm chart for psmdb operator and db * tested backup and restore * updated doc * added end of line in all files * updated values file * added templates for backup and restore files * worked on feedbacks --- charts/psmdb-operator-db/Chart.yaml | 21 + charts/psmdb-operator-db/DOC.md | 2 + charts/psmdb-operator-db/README.md | 266 ++++++ .../psmdb-operator-db/templates/backup.yaml | 18 + .../psmdb-operator-db/templates/restore.yaml | 17 + charts/psmdb-operator-db/values.yaml | 805 ++++++++++++++++++ 6 files changed, 1129 insertions(+) create mode 100644 charts/psmdb-operator-db/Chart.yaml create mode 100644 charts/psmdb-operator-db/DOC.md create mode 100644 charts/psmdb-operator-db/README.md create mode 100644 charts/psmdb-operator-db/templates/backup.yaml create mode 100644 charts/psmdb-operator-db/templates/restore.yaml create mode 100644 charts/psmdb-operator-db/values.yaml diff --git a/charts/psmdb-operator-db/Chart.yaml b/charts/psmdb-operator-db/Chart.yaml new file mode 100644 index 0000000..4cc240d --- /dev/null +++ b/charts/psmdb-operator-db/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: psmdb-operator-db +description: A Helm chart for Percona Operator and Percona Server for MongoDB +type: application +version: 1.0.0 +appVersion: 1.0.0 +dependencies: + - name: psmdb-operator + version: 1.18.0 + repository: https://percona.github.io/percona-helm-charts/ + alias: psmdb-operator + tags: + - psmdb-operator + condition: psmdb-operator.enabled + - name: psmdb-db + version: 1.18.0 + repository: https://percona.github.io/percona-helm-charts/ + alias: psmdb-db + tags: + - psmdb-db + condition: psmdb-db.enabled diff --git a/charts/psmdb-operator-db/DOC.md b/charts/psmdb-operator-db/DOC.md new file mode 100644 index 0000000..b54602b --- /dev/null +++ b/charts/psmdb-operator-db/DOC.md @@ -0,0 +1,2 @@ +Backup and Restore have been tested using backup.yaml and restore.yaml files respectively using Azure Blob Storage. +For using cloud storage as backup, a Kubernetes secret need to be made: https://docs.percona.com/percona-operator-for-mongodb/backup-tutorial.html#configure-backup-storage diff --git a/charts/psmdb-operator-db/README.md b/charts/psmdb-operator-db/README.md new file mode 100644 index 0000000..255ac95 --- /dev/null +++ b/charts/psmdb-operator-db/README.md @@ -0,0 +1,266 @@ +# Percona Server for MongoDB + +This chart deploys Percona Operator and Percona Server for MongoDB Cluster on Kubernetes controlled by Percona Operator for MongoDB. + +Useful links: +- [Operator Github repository](https://github.com/percona/percona-server-mongodb-operator) +- [Operator Documentation](https://www.percona.com/doc/kubernetes-operator-for-psmongodb/index.html) + +## Pre-requisites +* Kubernetes 1.26+ +* Helm v3 + +# Chart Details +This chart will deploy the Operator Pod and Percona Server for MongoDB Cluster in Kubernetes. It will create a Custom Resource, and the Operator will trigger the creation of corresponding Kubernetes primitives: StatefulSets, Pods, Secrets, etc. + +## Installing the Chart +To install the chart with the `psmdb` release name using a dedicated namespace (recommended): + +```sh +helm dependency build +helm install my-db --namespace my-namespace +``` + +The chart can be customized using the following configurable parameters: + +| Parameter | Description | Default | +| ------------------------------- | ------------------------------------------------------------------------------|---------------------------------------| +| `crVersion` | CR Cluster Manifest version | `1.16.2` | +| `pause` | Stop PSMDB Database safely | `false` | +| `unmanaged` | Start cluster and don't manage it (cross cluster replication) | `false` | +| `unsafeFlags.tls` | Allows users from configuring a cluster without TLS/SSL certificates | `false` | +| `unsafeFlags.replsetSize` | Allows users from configuring a cluster with unsafe parameters: starting it with less than 3 replica set instances or with an even number of replica set instances without additional arbiter | `false` | +| `unsafeFlags.mongosSize` | Allows users from configuring a sharded cluster with less than 3 config server Pods or less than 2 mongos Pods | `false` | +| `unsafeFlags.terminationGracePeriod` | Allows users from configuring a sharded cluster without termination grace period for replica set | `false` | +| `unsafeFlags.backupIfUnhealthy` | Allows running backup on a cluster with failed health checks | `false` | +| `clusterServiceDNSSuffix` | The (non-standard) cluster domain to be used as a suffix of the Service name | `""` | +| `clusterServiceDNSMode` | Mode for the cluster service dns (Internal/ServiceMesh) | `""` | +| `annotations` | PSMDB custom resource annotations | `{}` | +| `ignoreAnnotations` | The list of annotations to be ignored by the Operator | `[]` | +| `ignoreLabels` | The list of labels to be ignored by the Operator | `[]` | +| `multiCluster.enabled` | Enable Multi Cluster Services (MCS) cluster mode | `false` | +| `multiCluster.DNSSuffix` | The cluster domain to be used as a suffix for multi-cluster Services used by Kubernetes | `""` | +| `updateStrategy` | Regulates the way how PSMDB Cluster Pods will be updated after setting a new image | `SmartUpdate` | +| `upgradeOptions.versionServiceEndpoint` | Endpoint for actual PSMDB Versions provider | `https://check.percona.com/versions/` | +| `upgradeOptions.apply` | PSMDB image to apply from version service - recommended, latest, actual version like 4.4.2-4 | `disabled` | +| `upgradeOptions.schedule` | Cron formatted time to execute the update | `"0 2 * * *"` | +| `upgradeOptions.setFCV` | Set feature compatibility version on major upgrade | `false` | +| `finalizers:delete-psmdb-pvc` | Set this if you want to delete database persistent volumes on cluster deletion | `[]` | +| `finalizers:delete-psmdb-pods-in-order` | Set this if you want to delete PSMDB pods in order (primary last) | `[]` | +| `image.repository` | PSMDB Container image repository | `percona/percona-server-mongodb` | +| `image.tag` | PSMDB Container image tag | `6.0.9-7` | +| `imagePullPolicy` | The policy used to update images | `Always` | +| `imagePullSecrets` | PSMDB Container pull secret | `[]` | +| `initImage.repository` | Repository for custom init image | `""` | +| `initImage.tag` | Tag for custom init image | `""` | +| `initContainerSecurityContext` | A custom Kubernetes Security Context for a Container for the initImage | `{}` | +| `tls.mode` | Control usage of TLS (allowTLS, preferTLS, requireTLS, disabled) | `preferTLS` | +| `tls.certValidityDuration` | The validity duration of the external certificate for cert manager | `""` | +| `tls.allowInvalidCertificates` | If enabled the mongo shell will not attempt to validate the server certificates | `true` | +| `tls.issuerConf.name` | A cert-manager issuer name | `""` | +| `tls.issuerConf.kind` | A cert-manager issuer kind | `""` | +| `tls.issuerConf.group` | A cert-manager issuer group | `""` | +| `secrets.users` | The name of the Secrets object for the MongoDB users required to run the operator | `""` | +| `secrets.encryptionKey` | Set secret for data at rest encryption key | `""` | +| `secrets.vault` | Specifies a secret object to provide integration with HashiCorp Vault | `""` | +| `secrets.ldapSecret` | Specifies a secret object for LDAP over TLS connection between MongoDB and OpenLDAP server | `""` | +| `secrets.sse` | The name of the Secrets object for server side encryption credentials | `""` | +| `secrets.ssl` | A secret with TLS certificate generated for external communications | `""` | +| `secrets.sslInternal` | A secret with TLS certificate generated for internal communications | `""` | +| `pmm.enabled` | Enable integration with [Percona Monitoring and Management software](https://www.percona.com/blog/2020/07/23/using-percona-kubernetes-operators-with-percona-monitoring-and-management/) | `false` | +| `pmm.image.repository` | PMM Container image repository | `percona/pmm-client` | +| `pmm.image.tag` | PMM Container image tag | `2.41.2` | +| `pmm.serverHost` | PMM server related K8S service hostname | `monitoring-service` | +|| +| `replsets.rs0.name` | ReplicaSet name | `rs0` | +| `replsets.rs0.size` | ReplicaSet size (pod quantity) | `3` | +| `replsets.rs0.terminationGracePeriodSeconds` | The amount of seconds Kubernetes will wait for a clean replica set Pods termination | `""` | +| `replsets.rs0.externalNodes` | ReplicaSet external nodes (cross cluster replication) | `[]` | +| `replsets.rs0.configuration` | Custom config for mongod in replica set | `""` | +| `replsets.rs0.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains | `{}` | +| `replsets.rs0.serviceAccountName` | Run replicaset Containers under specified K8S SA | `""` | +| `replsets.rs0.affinity.antiAffinityTopologyKey` | ReplicaSet Pod affinity | `kubernetes.io/hostname` | +| `replsets.rs0.affinity.advanced` | ReplicaSet Pod advanced affinity | `{}` | +| `replsets.rs0.tolerations` | ReplicaSet Pod tolerations | `[]` | +| `replsets.rs0.priorityClass` | ReplicaSet Pod priorityClassName | `""` | +| `replsets.rs0.annotations` | ReplicaSet Pod annotations | `{}` | +| `replsets.rs0.labels` | ReplicaSet Pod labels | `{}` | +| `replsets.rs0.nodeSelector` | ReplicaSet Pod nodeSelector labels | `{}` | +| `replsets.rs0.livenessProbe` | ReplicaSet Pod livenessProbe structure | `{}` | +| `replsets.rs0.readinessProbe` | ReplicaSet Pod readinessProbe structure | `{}` | +| `replsets.rs0.storage` | Set cacheSizeRatio or other custom MongoDB storage options | `{}` | +| `replsets.rs0.podSecurityContext` | Set the security context for a Pod | `{}` | +| `replsets.rs0.containerSecurityContext` | Set the security context for a Container | `{}` | +| `replsets.rs0.runtimeClass` | ReplicaSet Pod runtimeClassName | `""` | +| `replsets.rs0.sidecars` | ReplicaSet Pod sidecars | `{}` | +| `replsets.rs0.sidecarVolumes` | ReplicaSet Pod sidecar volumes | `[]` | +| `replsets.rs0.sidecarPVCs` | ReplicaSet Pod sidecar PVCs | `[]` | +| `replsets.rs0.podDisruptionBudget.maxUnavailable` | ReplicaSet failed Pods maximum quantity | `1` | +| `replsets.rs0.splitHorizons` | External URI for Split-horizon for replica set Pods of the exposed cluster | `{}` | +| `replsets.rs0.expose.enabled` | Allow access to replicaSet from outside of Kubernetes | `false` | +| `replsets.rs0.expose.exposeType` | Network service access point type | `ClusterIP` | +| `replsets.rs0.expose.loadBalancerSourceRanges` | Limit client IP's access to Load Balancer | `{}` | +| `replsets.rs0.expose.serviceAnnotations` | ReplicaSet service annotations | `{}` | +| `replsets.rs0.expose.serviceLabels` | ReplicaSet service labels | `{}` | +| `replsets.rs0.schedulerName` | ReplicaSet Pod schedulerName | `""` | +| `replsets.rs0.resources` | ReplicaSet Pods resource requests and limits | `{}` | +| `replsets.rs0.volumeSpec` | ReplicaSet Pods storage resources | `{}` | +| `replsets.rs0.volumeSpec.emptyDir` | ReplicaSet Pods emptyDir K8S storage | `{}` | +| `replsets.rs0.volumeSpec.hostPath` | ReplicaSet Pods hostPath K8S storage | | +| `replsets.rs0.volumeSpec.hostPath.path` | ReplicaSet Pods hostPath K8S storage path | `""` | +| `replsets.rs0.volumeSpec.hostPath.type` | Type for hostPath volume | `Directory` | +| `replsets.rs0.volumeSpec.pvc` | ReplicaSet Pods PVC request parameters | | +| `replsets.rs0.volumeSpec.pvc.annotations` | The Kubernetes annotations metadata for Persistent Volume Claim | `{}` | +| `replsets.rs0.volumeSpec.pvc.labels` | The Kubernetes labels metadata for Persistent Volume Claim | `{}` | +| `replsets.rs0.volumeSpec.pvc.storageClassName` | ReplicaSet Pods PVC target storageClass | `""` | +| `replsets.rs0.volumeSpec.pvc.accessModes` | ReplicaSet Pods PVC access policy | `[]` | +| `replsets.rs0.volumeSpec.pvc.resources.requests.storage` | ReplicaSet Pods PVC storage size | `3Gi` | +| `replsets.rs0.hostAliases` | The IP address for Kubernetes host aliases | `[]` | +| `replsets.rs0.nonvoting.enabled` | Add MongoDB nonvoting Pods | `false` | +| `replsets.rs0.nonvoting.podSecurityContext` | Set the security context for a Pod | `{}` | +| `replsets.rs0.nonvoting.containerSecurityContext` | Set the security context for a Container | `{}` | +| `replsets.rs0.nonvoting.size` | Number of nonvoting Pods | `1` | +| `replsets.rs0.nonvoting.configuration` | Custom config for mongod nonvoting member | `""` | +| `replsets.rs0.nonvoting.serviceAccountName` | Run replicaset nonvoting Container under specified K8S SA | `""` | +| `replsets.rs0.nonvoting.affinity.antiAffinityTopologyKey` | Nonvoting Pods affinity | `kubernetes.io/hostname` | +| `replsets.rs0.nonvoting.affinity.advanced` | Nonvoting Pods advanced affinity | `{}` | +| `replsets.rs0.nonvoting.tolerations` | Nonvoting Pod tolerations | `[]` | +| `replsets.rs0.nonvoting.priorityClass` | Nonvoting Pod priorityClassName | `""` | +| `replsets.rs0.nonvoting.annotations` | Nonvoting Pod annotations | `{}` | +| `replsets.rs0.nonvoting.labels` | Nonvoting Pod labels | `{}` | +| `replsets.rs0.nonvoting.nodeSelector` | Nonvoting Pod nodeSelector labels | `{}` | +| `replsets.rs0.nonvoting.podDisruptionBudget.maxUnavailable` | Nonvoting failed Pods maximum quantity | `1` | +| `replsets.rs0.nonvoting.resources` | Nonvoting Pods resource requests and limits | `{}` | +| `replsets.rs0.nonvoting.volumeSpec` | Nonvoting Pods storage resources | `{}` | +| `replsets.rs0.nonvoting.volumeSpec.emptyDir` | Nonvoting Pods emptyDir K8S storage | `{}` | +| `replsets.rs0.nonvoting.volumeSpec.hostPath` | Nonvoting Pods hostPath K8S storage | | +| `replsets.rs0.nonvoting.volumeSpec.hostPath.path` | Nonvoting Pods hostPath K8S storage path | `""` | +| `replsets.rs0.nonvoting.volumeSpec.hostPath.type` | Type for hostPath volume | `Directory` | +| `replsets.rs0.nonvoting.volumeSpec.pvc` | Nonvoting Pods PVC request parameters | | +| `replsets.rs0.nonvoting.volumeSpec.pvc.annotations` | The Kubernetes annotations metadata for Persistent Volume Claim | `{}` | +| `replsets.rs0.nonvoting.volumeSpec.pvc.labels` | The Kubernetes labels metadata for Persistent Volume Claim | `{}` | +| `replsets.rs0.nonvoting.volumeSpec.pvc.storageClassName` | Nonvoting Pods PVC target storageClass | `""` | +| `replsets.rs0.nonvoting.volumeSpec.pvc.accessModes` | Nonvoting Pods PVC access policy | `[]` | +| `replsets.rs0.nonvoting.volumeSpec.pvc.resources.requests.storage` | Nonvoting Pods PVC storage size | `3Gi` | +| `replsets.rs0.arbiter.enabled` | Create MongoDB arbiter service | `false` | +| `replsets.rs0.arbiter.size` | MongoDB arbiter Pod quantity | `1` | +| `replsets.rs0.arbiter.serviceAccountName` | Run replicaset arbiter Container under specified K8S SA | `""` | +| `replsets.rs0.arbiter.affinity.antiAffinityTopologyKey` | MongoDB arbiter Pod affinity | `kubernetes.io/hostname` | +| `replsets.rs0.arbiter.affinity.advanced` | MongoDB arbiter Pod advanced affinity | `{}` | +| `replsets.rs0.arbiter.tolerations` | MongoDB arbiter Pod tolerations | `[]` | +| `replsets.rs0.arbiter.priorityClass` | MongoDB arbiter priorityClassName | `""` | +| `replsets.rs0.arbiter.annotations` | MongoDB arbiter Pod annotations | `{}` | +| `replsets.rs0.arbiter.labels` | MongoDB arbiter Pod labels | `{}` | +| `replsets.rs0.arbiter.nodeSelector` | MongoDB arbiter Pod nodeSelector labels | `{}` | +| | +| `sharding.enabled` | Enable sharding setup | `true` | +| `sharding.balancer.enabled` | Enable/disable balancer | `true` | +| `sharding.configrs.size` | Config ReplicaSet size (pod quantity) | `3` | +| `sharding.configrs.terminationGracePeriodSeconds` | The amount of seconds Kubernetes will wait for a clean replica set Pods termination | `""` | +| `sharding.configrs.externalNodes` | Config ReplicaSet external nodes (cross cluster replication) | `[]` | +| `sharding.configrs.configuration` | Custom config for mongod in config replica set | `""` | +| `sharding.configrs.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains | `{}` | +| `sharding.configrs.serviceAccountName` | Run sharding configrs Containers under specified K8S SA | `""` | +| `sharding.configrs.affinity.antiAffinityTopologyKey` | Config ReplicaSet Pod affinity | `kubernetes.io/hostname` | +| `sharding.configrs.affinity.advanced` | Config ReplicaSet Pod advanced affinity | `{}` | +| `sharding.configrs.tolerations` | Config ReplicaSet Pod tolerations | `[]` | +| `sharding.configrs.priorityClass` | Config ReplicaSet Pod priorityClassName | `""` | +| `sharding.configrs.annotations` | Config ReplicaSet Pod annotations | `{}` | +| `sharding.configrs.labels` | Config ReplicaSet Pod labels | `{}` | +| `sharding.configrs.nodeSelector` | Config ReplicaSet Pod nodeSelector labels | `{}` | +| `sharding.configrs.livenessProbe` | Config ReplicaSet Pod livenessProbe structure | `{}` | +| `sharding.configrs.readinessProbe` | Config ReplicaSet Pod readinessProbe structure | `{}` | +| `sharding.configrs.storage` | Set cacheSizeRatio or other custom MongoDB storage options | `{}` | +| `sharding.configrs.podSecurityContext` | Set the security context for a Pod | `{}` | +| `sharding.configrs.containerSecurityContext` | Set the security context for a Container | `{}` | +| `sharding.configrs.runtimeClass` | Config ReplicaSet Pod runtimeClassName | `""` | +| `sharding.configrs.sidecars` | Config ReplicaSet Pod sidecars | `{}` | +| `sharding.configrs.sidecarVolumes` | Config ReplicaSet Pod sidecar volumes | `[]` | +| `sharding.configrs.sidecarPVCs` | Config ReplicaSet Pod sidecar PVCs | `[]` | +| `sharding.configrs.podDisruptionBudget.maxUnavailable` | Config ReplicaSet failed Pods maximum quantity | `1` | +| `sharding.configrs.expose.enabled` | Allow access to cfg replica from outside of Kubernetes | `false` | +| `sharding.configrs.expose.exposeType` | Network service access point type | `ClusterIP` | +| `sharding.configrs.expose.loadBalancerSourceRanges` | Limit client IP's access to Load Balancer | `{}` | +| `sharding.configrs.expose.serviceAnnotations` | Config ReplicaSet service annotations | `{}` | +| `sharding.configrs.expose.serviceLabels` | Config ReplicaSet service labels | `{}` | +| `sharding.configrs.resources.limits.cpu` | Config ReplicaSet resource limits CPU | `300m` | +| `sharding.configrs.resources.limits.memory` | Config ReplicaSet resource limits memory | `0.5G` | +| `sharding.configrs.resources.requests.cpu` | Config ReplicaSet resource requests CPU | `300m` | +| `sharding.configrs.resources.requests.memory` | Config ReplicaSet resource requests memory | `0.5G` | +| `sharding.configrs.volumeSpec.hostPath` | Config ReplicaSet hostPath K8S storage | | +| `sharding.configrs.volumeSpec.hostPath.path` | Config ReplicaSet hostPath K8S storage path | `""` | +| `sharding.configrs.volumeSpec.hostPath.type` | Type for hostPath volum | `Directory` | +| `sharding.configrs.volumeSpec.emptyDir` | Config ReplicaSet Pods emptyDir K8S storage | | +| `sharding.configrs.volumeSpec.pvc` | Config ReplicaSet Pods PVC request parameters | | +| `sharding.configrs.volumeSpec.pvc.annotations` | The Kubernetes annotations metadata for Persistent Volume Claim | `{}` | +| `sharding.configrs.volumeSpec.pvc.labels` | The Kubernetes labels metadata for Persistent Volume Claim | `{}` | +| `sharding.configrs.volumeSpec.pvc.storageClassName` | Config ReplicaSet Pods PVC storageClass | `""` | +| `sharding.configrs.volumeSpec.pvc.accessModes` | Config ReplicaSet Pods PVC access policy | `[]` | +| `sharding.configrs.volumeSpec.pvc.resources.requests.storage` | Config ReplicaSet Pods PVC storage size | `3Gi` | +| `sharding.configrs.hostAliases` | The IP address for Kubernetes host aliases | `[]` | +| `sharding.mongos.size` | Mongos size (pod quantity) | `3` | +| `sharding.mongos.terminationGracePeriodSeconds` | The amount of seconds Kubernetes will wait for a clean mongos Pods termination | `""` | +| `sharding.mongos.configuration` | Custom config for mongos | `""` | +| `sharding.mongos.topologySpreadConstraints` | Control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains | `{}` | +| `sharding.mongos.serviceAccountName` | Run sharding mongos Containers under specified K8S SA | `""` | +| `sharding.mongos.affinity.antiAffinityTopologyKey` | Mongos Pods affinity | `kubernetes.io/hostname` | +| `sharding.mongos.affinity.advanced` | Mongos Pods advanced affinity | `{}` | +| `sharding.mongos.tolerations` | Mongos Pods tolerations | `[]` | +| `sharding.mongos.priorityClass` | Mongos Pods priorityClassName | `""` | +| `sharding.mongos.annotations` | Mongos Pods annotations | `{}` | +| `sharding.mongos.labels` | Mongos Pods labels | `{}` | +| `sharding.mongos.nodeSelector` | Mongos Pods nodeSelector labels | `{}` | +| `sharding.mongos.livenessProbe` | Mongos Pod livenessProbe structure | `{}` | +| `sharding.mongos.readinessProbe` | Mongos Pod readinessProbe structure | `{}` | +| `sharding.mongos.podSecurityContext` | Set the security context for a Pod | `{}` | +| `sharding.mongos.containerSecurityContext` | Set the security context for a Container | `{}` | +| `sharding.mongos.runtimeClass` | Mongos Pod runtimeClassName | `""` | +| `sharding.mongos.sidecars` | Mongos Pod sidecars | `{}` | +| `sharding.mongos.sidecarVolumes` | Mongos Pod sidecar volumes | `[]` | +| `sharding.mongos.sidecarPVCs` | Mongos Pod sidecar PVCs | `[]` | +| `sharding.mongos.podDisruptionBudget.maxUnavailable` | Mongos failed Pods maximum quantity | `1` | +| `sharding.mongos.resources.limits.cpu` | Mongos Pods resource limits CPU | `300m` | +| `sharding.mongos.resources.limits.memory` | Mongos Pods resource limits memory | `0.5G` | +| `sharding.mongos.resources.requests.cpu` | Mongos Pods resource requests CPU | `300m` | +| `sharding.mongos.resources.requests.memory` | Mongos Pods resource requests memory | `0.5G` | +| `sharding.mongos.expose.exposeType` | Mongos service exposeType | `ClusterIP` | +| `sharding.mongos.expose.servicePerPod` | Create a separate ClusterIP Service for each mongos instance | `false` | +| `sharding.mongos.expose.loadBalancerSourceRanges` | Limit client IP's access to Load Balancer | `{}` | +| `sharding.mongos.expose.serviceAnnotations` | Mongos service annotations | `{}` | +| `sharding.mongos.expose.serviceLabels` | Mongos service labels | `{}` | +| `sharding.mongos.expose.nodePort` | Custom port if exposing mongos via NodePort | `""` | +| `sharding.mongos.hostAliases` | The IP address for Kubernetes host aliases | `[]` | +| | +| `backup.enabled` | Enable backup PBM agent | `true` | +| `backup.annotations` | Backup job annotations | `{}` | +| `backup.podSecurityContext` | Set the security context for a Pod | `{}` | +| `backup.containerSecurityContext` | Set the security context for a Container | `{}` | +| `backup.restartOnFailure` | Backup Pods restart policy | `true` | +| `backup.image.repository` | PBM Container image repository | `percona/percona-backup-mongodb` | +| `backup.image.tag` | PBM Container image tag | `2.3.0` | +| `backup.storages` | Local/remote backup storages settings | `{}` | +| `backup.pitr.enabled` | Enable point in time recovery for backup | `false` | +| `backup.pitr.oplogOnly` | Start collecting oplogs even if full logical backup doesn't exist | `false` | +| `backup.pitr.oplogSpanMin` | Number of minutes between the uploads of oplogs | `10` | +| `backup.pitr.compressionType` | The point-in-time-recovery chunks compression format | `""` | +| `backup.pitr.compressionLevel` | The point-in-time-recovery chunks compression level | `""` | +| `backup.configuration.backupOptions` | Custom configuration settings for backup | `{}` | +| `backup.configuration.restoreOptions` | Custom configuration settings for restore | `{}` | +| `backup.tasks` | Backup working schedule | `{}` | +| `users` | PSMDB essential users | `{}` | + + +Specify parameters using `--set key=value[,key=value]` argument to `helm install` +Notice that you can use multiple replica sets only with sharding enabled. + +## Examples + +### Deploy a replica set with disabled backups and no mongos pods + +This is great for a dev PSMDB/MongoDB cluster as it doesn't bother with backups and sharding setup. + +```bash +$ helm install dev --namespace psmdb . \ + --set runUid=1001 --set "replsets.rs0.volumeSpec.pvc.resources.requests.storage=20Gi" \ + --set backup.enabled=false --set sharding.enabled=false +``` diff --git a/charts/psmdb-operator-db/templates/backup.yaml b/charts/psmdb-operator-db/templates/backup.yaml new file mode 100644 index 0000000..98457f2 --- /dev/null +++ b/charts/psmdb-operator-db/templates/backup.yaml @@ -0,0 +1,18 @@ +{{- if .Values.backup.enabled }} +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + name: {{ .Values.backup.name }} +{{- if .Values.backup.annotations }} + annotations: +{{ .Values.backup.annotations | toYaml | indent 4 }} +{{- end }} +{{- if .Values.backup.labels }} + labels: +{{ .Values.backup.labels | toYaml | indent 4 }} +{{- end }} +spec: + clusterName: {{ .Values.backup.clusterName }} + storageName: {{ .Values.backup.storageName }} + type: {{ .Values.backup.type }} +{{- end }} diff --git a/charts/psmdb-operator-db/templates/restore.yaml b/charts/psmdb-operator-db/templates/restore.yaml new file mode 100644 index 0000000..1244ac7 --- /dev/null +++ b/charts/psmdb-operator-db/templates/restore.yaml @@ -0,0 +1,17 @@ +{{- if .Values.restore.enabled }} +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBRestore +metadata: + name: {{ .Values.restore.name }} +{{- if .Values.restore.annotations }} + annotations: +{{ .Values.restore.annotations | toYaml | indent 4 }} +{{- end }} +{{- if .Values.restore.labels }} + labels: +{{ .Values.restore.labels | toYaml | indent 4 }} +{{- end }} +spec: + clusterName: {{ .Values.restore.clusterName }} + backupName: {{ .Values.restore.backupName }} +{{- end }} diff --git a/charts/psmdb-operator-db/values.yaml b/charts/psmdb-operator-db/values.yaml new file mode 100644 index 0000000..543cbc3 --- /dev/null +++ b/charts/psmdb-operator-db/values.yaml @@ -0,0 +1,805 @@ +psmdb-operator: + enabled: true + # Default values for psmdb-operator. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + replicaCount: 1 + + image: + repository: percona/percona-server-mongodb-operator + tag: 1.18.0 + pullPolicy: IfNotPresent + + # disableTelemetry: according to + # https://docs.percona.com/percona-operator-for-mongodb/telemetry.html + # this is how you can disable telemetry collection + # default is false which means telemetry will be collected + disableTelemetry: false + + # set if you want to specify a namespace to watch + # defaults to `.Release.namespace` if left blank + # multiple namespaces can be specified and separated by comma + # watchNamespace: + # set if you want that watched namespaces are created by helm + # createNamespace: false + + # set if operator should be deployed in cluster wide mode. defaults to false + watchAllNamespaces: false + + # rbac: settings for deployer RBAC creation + rbac: + # rbac.create: if false RBAC resources should be in place + create: true + + # serviceAccount: settings for Service Accounts used by the deployer + serviceAccount: + # serviceAccount.create: Whether to create the Service Accounts or not + create: true + # annotations to add to the service account + annotations: {} + + # annotations to add to the operator deployment + annotations: {} + + # labels to add to the operator deployment + labels: {} + + # annotations to add to the operator pod + podAnnotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "8080" + + # labels to the operator pod + podLabels: {} + + podSecurityContext: {} + # runAsNonRoot: true + # runAsUser: 2 + # runAsGroup: 2 + # fsGroup: 2 + # fsGroupChangePolicy: "OnRootMismatch" + + securityContext: {} + # allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + # seccompProfile: + # type: RuntimeDefault + + # set if you want to use a different operator name + # defaults to `percona-server-mongodb-operator` + # operatorName: + + imagePullSecrets: [] + nameOverride: "" + fullnameOverride: "" + + env: + resyncPeriod: 5s + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + logStructured: false + logLevel: "INFO" +psmdb-db: + enabled: true + + # Default values for psmdb-cluster. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + # Platform type: kubernetes, openshift + # platform: kubernetes + + # Cluster DNS Suffix + # clusterServiceDNSSuffix: svc.cluster.local + # clusterServiceDNSMode: "Internal" + + finalizers: + ## Set this if you want that operator deletes the primary pod last + - percona.com/delete-psmdb-pods-in-order + ## Set this if you want to delete database persistent volumes on cluster deletion + # - percona.com/delete-psmdb-pvc + ## Set this if you want to delete all pitr chunks on cluster deletion + # - percona.com/delete-pitr-chunks + + nameOverride: "" + fullnameOverride: "" + + crVersion: 1.18.0 + pause: false + unmanaged: false + unsafeFlags: + tls: false + replsetSize: true + mongosSize: false + terminationGracePeriod: false + backupIfUnhealthy: false + + enableVolumeExpansion: false + + annotations: {} + + # ignoreAnnotations: + # - service.beta.kubernetes.io/aws-load-balancer-backend-protocol + # ignoreLabels: + # - rack + multiCluster: + enabled: false + # DNSSuffix: svc.clusterset.local + updateStrategy: SmartUpdate + upgradeOptions: + versionServiceEndpoint: https://check.percona.com + apply: disabled + schedule: "0 2 * * *" + setFCV: false + + image: + repository: percona/percona-server-mongodb + tag: 7.0.14-8-multi + + imagePullPolicy: Always + # imagePullSecrets: [] + # initImage: + # repository: percona/percona-server-mongodb-operator + # tag: 1.18.0 + # initContainerSecurityContext: {} + # tls: + # mode: preferTLS + # # 90 days in hours + # certValidityDuration: 2160h + # allowInvalidCertificates: true + # issuerConf: + # name: special-selfsigned-issuer + # kind: ClusterIssuer + # group: cert-manager.io + secrets: {} + # If you set users secret here the operator will use existing one or generate random values + # If not set the operator generates the default secret with name -secrets + # users: my-cluster-name-secrets + # encryptionKey: my-cluster-name-mongodb-encryption-key + # keyFile: my-cluster-name-mongodb-keyfile + # vault: my-cluster-name-vault + # ldapSecret: my-ldap-secret + # sse: my-cluster-name-sse + + pmm: + enabled: false + image: + repository: percona/pmm-client + tag: 2.43.2 + serverHost: monitoring-service + # mongodParams: "" + # mongosParams: "" + # resources: {} + # containerSecurityContext: {} + + replsets: + rs0: + name: rs0 + size: 3 + # terminationGracePeriodSeconds: 300 + # externalNodes: + # - host: 34.124.76.90 + # - host: 34.124.76.91 + # port: 27017 + # votes: 0 + # priority: 0 + # - host: 34.124.76.92 + # configuration: | + # operationProfiling: + # mode: slowOp + # systemLog: + # verbosity: 1 + # serviceAccountName: percona-server-mongodb-operator + # topologySpreadConstraints: + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: percona-server-mongodb + # maxSkew: 1 + # topologyKey: kubernetes.io/hostname + # whenUnsatisfiable: DoNotSchedule + # replsetOverrides: + # my-cluster-name-rs0-0: + # host: my-cluster-name-rs0-0.example.net:27017 + # tags: + # key: value-0 + # my-cluster-name-rs0-1: + # host: my-cluster-name-rs0-1.example.net:27017 + # tags: + # key: value-1 + # my-cluster-name-rs0-2: + # host: my-cluster-name-rs0-2.example.net:27017 + # tags: + # key: value-2 + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + # advanced: + # podAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: security + # operator: In + # values: + # - S1 + # topologyKey: failure-domain.beta.kubernetes.io/zone + # tolerations: [] + # primaryPreferTagSelector: + # region: us-west-2 + # zone: us-west-2c + # priorityClass: "" + # annotations: {} + # labels: {} + # podSecurityContext: {} + # containerSecurityContext: {} + # nodeSelector: {} + # livenessProbe: + # failureThreshold: 4 + # initialDelaySeconds: 60 + # periodSeconds: 30 + # timeoutSeconds: 10 + # startupDelaySeconds: 7200 + # readinessProbe: + # failureThreshold: 8 + # initialDelaySeconds: 10 + # periodSeconds: 3 + # successThreshold: 1 + # timeoutSeconds: 2 + # runtimeClassName: image-rc + # storage: + # engine: wiredTiger + # wiredTiger: + # engineConfig: + # cacheSizeRatio: 0.5 + # directoryForIndexes: false + # journalCompressor: snappy + # collectionConfig: + # blockCompressor: snappy + # indexConfig: + # prefixCompression: true + # inMemory: + # engineConfig: + # inMemorySizeRatio: 0.5 + # sidecars: + # - image: busybox + # command: ["/bin/sh"] + # args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"] + # name: rs-sidecar-1 + # volumeMounts: + # - mountPath: /volume1 + # name: sidecar-volume-claim + # - mountPath: /secret + # name: sidecar-secret + # - mountPath: /configmap + # name: sidecar-config + # sidecarVolumes: + # - name: sidecar-secret + # secret: + # secretName: mysecret + # - name: sidecar-config + # configMap: + # name: myconfigmap + # sidecarPVCs: + # - apiVersion: v1 + # kind: PersistentVolumeClaim + # metadata: + # name: sidecar-volume-claim + # spec: + # resources: + # requests: + # storage: 1Gi + # volumeMode: Filesystem + # accessModes: + # - ReadWriteOnce + podDisruptionBudget: + maxUnavailable: 1 + # splitHorizons: + # my-cluster-name-rs0-0: + # external: rs0-0.mycluster.xyz + # external-2: rs0-0.mycluster2.xyz + # my-cluster-name-rs0-1: + # external: rs0-1.mycluster.xyz + # external-2: rs0-1.mycluster2.xyz + # my-cluster-name-rs0-2: + # external: rs0-2.mycluster.xyz + # external-2: rs0-2.mycluster2.xyz + expose: + enabled: false + type: ClusterIP + # loadBalancerIP: 10.0.0.0 + # loadBalancerSourceRanges: + # - 10.0.0.0/8 + # annotations: + # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + # labels: + # some-label: some-key + # internalTrafficPolicy: Local + # schedulerName: "" + resources: + limits: + cpu: "300m" + memory: "0.5G" + requests: + cpu: "300m" + memory: "0.5G" + volumeSpec: + # emptyDir: {} + # hostPath: + # path: /data + # type: Directory + pvc: + # annotations: + # volume.beta.kubernetes.io/storage-class: example-hostpath + # labels: + # rack: rack-22 + # storageClassName: standard + # accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 3Gi + # hostAliases: + # - ip: "10.10.0.2" + # hostnames: + # - "host1" + # - "host2" + nonvoting: + enabled: false + # podSecurityContext: {} + # containerSecurityContext: {} + size: 3 + # configuration: | + # operationProfiling: + # mode: slowOp + # systemLog: + # verbosity: 1 + # serviceAccountName: percona-server-mongodb-operator + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + # advanced: + # podAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: security + # operator: In + # values: + # - S1 + # topologyKey: failure-domain.beta.kubernetes.io/zone + # tolerations: [] + # priorityClass: "" + # annotations: {} + # labels: {} + # nodeSelector: {} + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "300m" + memory: "0.5G" + requests: + cpu: "300m" + memory: "0.5G" + volumeSpec: + # emptyDir: {} + # hostPath: + # path: /data + # type: Directory + pvc: + # annotations: + # volume.beta.kubernetes.io/storage-class: example-hostpath + # labels: + # rack: rack-22 + # storageClassName: standard + # accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 3Gi + arbiter: + enabled: false + size: 1 + # serviceAccountName: percona-server-mongodb-operator + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + # advanced: + # podAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: security + # operator: In + # values: + # - S1 + # topologyKey: failure-domain.beta.kubernetes.io/zone + # tolerations: [] + # priorityClass: "" + # annotations: {} + # labels: {} + # nodeSelector: {} + + sharding: + enabled: true + balancer: + enabled: true + + configrs: + size: 3 + # terminationGracePeriodSeconds: 300 + # externalNodes: + # - host: 34.124.76.90 + # - host: 34.124.76.91 + # port: 27017 + # votes: 0 + # priority: 0 + # - host: 34.124.76.92 + # configuration: | + # operationProfiling: + # mode: slowOp + # systemLog: + # verbosity: 1 + # serviceAccountName: percona-server-mongodb-operator + # topologySpreadConstraints: + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: percona-server-mongodb + # maxSkew: 1 + # topologyKey: kubernetes.io/hostname + # whenUnsatisfiable: DoNotSchedule + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + # advanced: + # podAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: security + # operator: In + # values: + # - S1 + # topologyKey: failure-domain.beta.kubernetes.io/zone + # tolerations: [] + # priorityClass: "" + # annotations: {} + # labels: {} + # podSecurityContext: {} + # containerSecurityContext: {} + # nodeSelector: {} + # livenessProbe: {} + # readinessProbe: {} + # runtimeClassName: image-rc + # sidecars: + # - image: busybox + # command: ["/bin/sh"] + # args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"] + # name: rs-sidecar-1 + # volumeMounts: + # - mountPath: /volume1 + # name: sidecar-volume-claim + # sidecarPVCs: [] + # sidecarVolumes: [] + podDisruptionBudget: + maxUnavailable: 1 + expose: + enabled: false + type: ClusterIP + # loadBalancerIP: 10.0.0.0 + # loadBalancerSourceRanges: + # - 10.0.0.0/8 + # annotations: + # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + # labels: + # some-label: some-key + # internalTrafficPolicy: Local + resources: + limits: + cpu: "300m" + memory: "0.5G" + requests: + cpu: "300m" + memory: "0.5G" + volumeSpec: + # emptyDir: {} + # hostPath: + # path: /data + # type: Directory + pvc: + # annotations: + # volume.beta.kubernetes.io/storage-class: example-hostpath + # labels: + # rack: rack-22 + # storageClassName: standard + # accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 3Gi + # hostAliases: + # - ip: "10.10.0.2" + # hostnames: + # - "host1" + # - "host2" + + mongos: + size: 3 + # terminationGracePeriodSeconds: 300 + # configuration: | + # systemLog: + # verbosity: 1 + # serviceAccountName: percona-server-mongodb-operator + # topologySpreadConstraints: + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: percona-server-mongodb + # maxSkew: 1 + # topologyKey: kubernetes.io/hostname + # whenUnsatisfiable: DoNotSchedule + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + # advanced: + # podAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: security + # operator: In + # values: + # - S1 + # topologyKey: failure-domain.beta.kubernetes.io/zone + # tolerations: [] + # priorityClass: "" + # annotations: {} + # labels: {} + # podSecurityContext: {} + # containerSecurityContext: {} + # nodeSelector: {} + # livenessProbe: {} + # readinessProbe: {} + # runtimeClassName: image-rc + # sidecars: + # - image: busybox + # command: ["/bin/sh"] + # args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"] + # name: rs-sidecar-1 + # volumeMounts: + # - mountPath: /volume1 + # name: sidecar-volume-claim + # sidecarPVCs: [] + # sidecarVolumes: [] + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "300m" + memory: "0.5G" + requests: + cpu: "300m" + memory: "0.5G" + expose: + enabled: false + type: ClusterIP + # loadBalancerIP: 10.0.0.0/8 + # loadBalancerSourceRanges: + # - 10.0.0.0/8 + # annotations: + # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + # labels: + # some-label: some-key + # internalTrafficPolicy: Local + # nodePort: 32017 + # auditLog: + # destination: file + # format: BSON + # filter: '{}' + # hostAliases: + # - ip: "10.10.0.2" + # hostnames: + # - "host1" + # - "host2" + + # users: + # - name: my-user + # db: admin + # passwordSecretRef: + # name: my-user-password + # key: my-user-password-key + # roles: + # - name: clusterAdmin + # db: admin + # - name: userAdminAnyDatabase + # db: admin + # - name: my-usr + # db: admin + # passwordSecretRef: + # name: my-user-pwd + # key: my-user-pwd-key + # roles: + # - name: dbOwner + # db: sometest + + # roles: + # - role: myClusterwideAdmin + # db: admin + # privileges: + # - resource: + # cluster: true + # actions: + # - addShard + # - resource: + # db: config + # collection: '' + # actions: + # - find + # - update + # - insert + # - remove + # roles: + # - role: read + # db: admin + # - role: my-role + # db: myDb + # privileges: + # - resource: + # db: '' + # collection: '' + # actions: + # - find + # authenticationRestrictions: + # - clientSource: + # - 127.0.0.1 + # serverAddress: + # - 127.0.0.1 + + + backup: + enabled: false + image: + repository: percona/percona-backup-mongodb + tag: 2.7.0-multi + # annotations: + # iam.amazonaws.com/role: role-arn + # podSecurityContext: {} + # containerSecurityContext: {} + # resources: + # limits: + # cpu: "300m" + # memory: "1.2G" + # requests: + # cpu: "300m" + # memory: "1G" + storages: + # s3-us-west: + # type: s3 + # s3: + # bucket: S3-BACKUP-BUCKET-NAME-HERE + # credentialsSecret: my-cluster-name-backup-s3 + # serverSideEncryption: + # kmsKeyID: 1234abcd-12ab-34cd-56ef-1234567890ab + # sseAlgorithm: aws:kms + # sseCustomerAlgorithm: AES256 + # sseCustomerKey: Y3VzdG9tZXIta2V5 + # retryer: + # numMaxRetries: 3 + # minRetryDelay: 30ms + # maxRetryDelay: 5m + # region: us-west-2 + # prefix: "" + # uploadPartSize: 10485760 + # maxUploadParts: 10000 + # storageClass: STANDARD + # insecureSkipTLSVerify: false + # minio: + # type: s3 + # s3: + # bucket: MINIO-BACKUP-BUCKET-NAME-HERE + # region: us-east-1 + # credentialsSecret: my-cluster-name-backup-minio + # endpointUrl: http://minio.psmdb.svc.cluster.local:9000/minio/ + # prefix: "" + # azure-blob: + # type: azure + # azure: + # container: percona-container + # prefix: backups + # endpointUrl: https://perconasa.blob.core.windows.net + # credentialsSecret: perconasasecret + pitr: + enabled: false + oplogOnly: false + # oplogSpanMin: 10 + # compressionType: gzip + # compressionLevel: 6 + # configuration: + # backupOptions: + # priority: + # "localhost:28019": 2.5 + # "localhost:27018": 2.5 + # timeouts: + # startingStatus: 33 + # oplogSpanMin: 10 + # restoreOptions: + # batchSize: 500 + # numInsertionWorkers: 10 + # numDownloadWorkers: 4 + # maxDownloadBufferMb: 0 + # downloadChunkMb: 32 + # mongodLocation: /usr/bin/mongo + # mongodLocationMap: + # "node01:2017": /usr/bin/mongo + # "node03:27017": /usr/bin/mongo + tasks: + # - name: daily-s3-us-west + # enabled: true + # schedule: "0 0 * * *" + # keep: 3 + # storageName: s3-us-west + # compressionType: gzip + # - name: weekly-s3-us-west + # enabled: false + # schedule: "0 0 * * 0" + # keep: 5 + # storageName: s3-us-west + # compressionType: gzip + # - name: weekly-s3-us-west-physical + # enabled: false + # schedule: "0 5 * * 0" + # keep: 5 + # type: physical + # storageName: s3-us-west + # compressionType: gzip + # compressionLevel: 6 + + # If you set systemUsers here the secret will be constructed by helm with these values + # systemUsers: + # MONGODB_BACKUP_USER: backup + # MONGODB_BACKUP_PASSWORD: backup123456 + # MONGODB_DATABASE_ADMIN_USER: databaseAdmin + # MONGODB_DATABASE_ADMIN_PASSWORD: databaseAdmin123456 + # MONGODB_CLUSTER_ADMIN_USER: clusterAdmin + # MONGODB_CLUSTER_ADMIN_PASSWORD: clusterAdmin123456 + # MONGODB_CLUSTER_MONITOR_USER: clusterMonitor + # MONGODB_CLUSTER_MONITOR_PASSWORD: clusterMonitor123456 + # MONGODB_USER_ADMIN_USER: userAdmin + # MONGODB_USER_ADMIN_PASSWORD: userAdmin123456 + # PMM_SERVER_API_KEY: apikey + # # PMM_SERVER_USER: admin + # # PMM_SERVER_PASSWORD: admin + +backup: + enabled: true + annotations: + description: "test" + name: backup + labels: + app: mongo-backup + environment: testing + clusterName: mdb-db-psmdb-db + storageName: azure-blob + type: logical + +restore: + enabled: true + annotations: + description: "test" + name: restore1 + labels: + app: mongo-restore + environment: testing + clusterName: mdb-db-psmdb-db + backupName: backup