diff --git a/.gitignore b/.gitignore index 5e379323af0..2df3774fd5b 100644 --- a/.gitignore +++ b/.gitignore @@ -79,3 +79,4 @@ product_docs/content/ product_docs/content_build/ static/nginx_redirects.generated temp_kubernetes/ +advocacy_docs/kubernetes/cloud_native_postgresql/*.md.in diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx index e4656eab0c1..d1d62a5efa3 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx @@ -20,365 +20,410 @@ documentation for examples of usage. Below you will find a description of the defined resources: - -* [Backup](#backup) -* [BackupList](#backuplist) -* [BackupSpec](#backupspec) -* [BackupStatus](#backupstatus) -* [AffinityConfiguration](#affinityconfiguration) -* [BackupConfiguration](#backupconfiguration) -* [BarmanObjectStoreConfiguration](#barmanobjectstoreconfiguration) -* [BootstrapConfiguration](#bootstrapconfiguration) -* [BootstrapInitDB](#bootstrapinitdb) -* [BootstrapRecovery](#bootstraprecovery) -* [Cluster](#cluster) -* [ClusterList](#clusterlist) -* [ClusterSpec](#clusterspec) -* [ClusterStatus](#clusterstatus) -* [DataBackupConfiguration](#databackupconfiguration) -* [MonitoringConfiguration](#monitoringconfiguration) -* [NodeMaintenanceWindow](#nodemaintenancewindow) -* [PostgresConfiguration](#postgresconfiguration) -* [RecoveryTarget](#recoverytarget) -* [RollingUpdateStatus](#rollingupdatestatus) -* [S3Credentials](#s3credentials) -* [StorageConfiguration](#storageconfiguration) -* [WalBackupConfiguration](#walbackupconfiguration) -* [ScheduledBackup](#scheduledbackup) -* [ScheduledBackupList](#scheduledbackuplist) -* [ScheduledBackupSpec](#scheduledbackupspec) -* [ScheduledBackupStatus](#scheduledbackupstatus) - -## Backup -Backup is the Schema for the backups API - -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| metadata | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectmeta-v1-meta) | false | -| spec | Specification of the desired behavior of the backup. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [BackupSpec](#backupspec) | false | -| status | Most recently observed status of the backup. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [BackupStatus](#backupstatus) | false | +- [AffinityConfiguration](#AffinityConfiguration) +- [Backup](#Backup) +- [BackupConfiguration](#BackupConfiguration) +- [BackupList](#BackupList) +- [BackupSpec](#BackupSpec) +- [BackupStatus](#BackupStatus) +- [BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration) +- [BootstrapConfiguration](#BootstrapConfiguration) +- [BootstrapInitDB](#BootstrapInitDB) +- [BootstrapRecovery](#BootstrapRecovery) +- [Cluster](#Cluster) +- [ClusterList](#ClusterList) +- [ClusterSpec](#ClusterSpec) +- [ClusterStatus](#ClusterStatus) +- [DataBackupConfiguration](#DataBackupConfiguration) +- [MonitoringConfiguration](#MonitoringConfiguration) +- [NodeMaintenanceWindow](#NodeMaintenanceWindow) +- [PostgresConfiguration](#PostgresConfiguration) +- [RecoveryTarget](#RecoveryTarget) +- [RollingUpdateStatus](#RollingUpdateStatus) +- [S3Credentials](#S3Credentials) +- [ScheduledBackup](#ScheduledBackup) +- [ScheduledBackupList](#ScheduledBackupList) +- [ScheduledBackupSpec](#ScheduledBackupSpec) +- [ScheduledBackupStatus](#ScheduledBackupStatus) +- [SecretsResourceVersion](#SecretsResourceVersion) +- [StorageConfiguration](#StorageConfiguration) +- [WalBackupConfiguration](#WalBackupConfiguration) + + + + +## `AffinityConfiguration` +AffinityConfiguration contains the info we need to create the affinity rules for Pods -## BackupList +Name | Description | Type +--------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------- +`enablePodAntiAffinity` | Activates anti-affinity for the pods. The operator will define pods anti-affinity unless this field is explicitly set to false | *bool +`topologyKey ` | TopologyKey to use for anti-affinity configuration. See k8s documentation for more info on that - *mandatory* | string +`nodeSelector ` | NodeSelector is map of key-value pairs used to define the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | map[string]string -BackupList contains a list of Backup + -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| metadata | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#listmeta-v1-meta) | false | -| items | List of backups | \[][Backup](#backup) | true | +## `Backup` +Backup is the Schema for the backups API -## BackupSpec +Name | Description | Type +-------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ +`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta) +`spec ` | Specification of the desired behavior of the backup. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [BackupSpec](#BackupSpec) +`status ` | Most recently observed status of the backup. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [BackupStatus](#BackupStatus) -BackupSpec defines the desired state of Backup + -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| cluster | The cluster to backup | [v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#localobjectreference-v1-core) | false | +## `BackupConfiguration` +BackupConfiguration defines how the backup of the cluster are taken. Currently the only supported backup method is barmanObjectStore. For details and examples refer to the Backup and Recovery section of the documentation -## BackupStatus +Name | Description | Type +----------------- | ------------------------------------------------- | ------------------------------------------------------------------ +`barmanObjectStore` | The configuration for the barman-cloud tool suite | [*BarmanObjectStoreConfiguration](#BarmanObjectStoreConfiguration) -BackupStatus defines the observed state of Backup + -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| s3Credentials | The credentials to use to upload data to S3 | [S3Credentials](#s3credentials) | true | -| endpointURL | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string | false | -| destinationPath | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data | string | true | -| serverName | The server name on S3, the cluster name is used if this parameter is omitted | string | false | -| encryption | Encryption method required to S3 API | string | false | -| backupId | The ID of the Barman backup | string | false | -| phase | The last backup status | BackupPhase | false | -| startedAt | When the backup was started | *metav1.Time | false | -| stoppedAt | When the backup was terminated | *metav1.Time | false | -| error | The detected error | string | false | -| commandOutput | The backup command output | string | false | -| commandError | The backup command output | string | false | +## `BackupList` +BackupList contains a list of Backup -## AffinityConfiguration +Name | Description | Type +-------- | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- +`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#listmeta-v1-meta) +`items ` | List of backups - *mandatory* | [[]Backup](#Backup) -AffinityConfiguration contains the info we need to create the affinity rules for Pods + -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| enablePodAntiAffinity | Activates anti-affinity for the pods. The operator will define pods anti-affinity unless this field is explicitly set to false | *bool | false | -| topologyKey | TopologyKey to use for anti-affinity configuration. See k8s documentation for more info on that | string | true | -| nodeSelector | NodeSelector is map of key-value pairs used to define the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | map[string]string | false | +## `BackupSpec` +BackupSpec defines the desired state of Backup -## BackupConfiguration +Name | Description | Type +------- | --------------------- | ---------------------------------------------------------------------------------------------------------------------------- +`cluster` | The cluster to backup | [v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) -BackupConfiguration defines how the backup of the cluster are taken. Currently the only supported backup method is barmanObjectStore. For details and examples refer to the Backup and Recovery section of the documentation + -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| barmanObjectStore | The configuration for the barman-cloud tool suite | *[BarmanObjectStoreConfiguration](#barmanobjectstoreconfiguration) | false | +## `BackupStatus` +BackupStatus defines the observed state of Backup -## BarmanObjectStoreConfiguration +Name | Description | Type +--------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------- +`s3Credentials ` | The credentials to use to upload data to S3 - *mandatory* | [S3Credentials](#S3Credentials) +`endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string +`destinationPath` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string +`serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string +`encryption ` | Encryption method required to S3 API | string +`backupId ` | The ID of the Barman backup | string +`phase ` | The last backup status | BackupPhase +`startedAt ` | When the backup was started | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) +`stoppedAt ` | When the backup was terminated | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) +`error ` | The detected error | string +`commandOutput ` | The backup command output | string +`commandError ` | The backup command output | string + + + +## `BarmanObjectStoreConfiguration` BarmanObjectStoreConfiguration contains the backup configuration using Barman against an S3-compatible object storage -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| s3Credentials | The credentials to use to upload data to S3 | [S3Credentials](#s3credentials) | true | -| endpointURL | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string | false | -| destinationPath | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data | string | true | -| serverName | The server name on S3, the cluster name is used if this parameter is omitted | string | false | -| wal | The configuration for the backup of the WAL stream. When not defined, WAL files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | *[WalBackupConfiguration](#walbackupconfiguration) | false | -| data | The configuration to be used to backup the data files When not defined, base backups files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | *[DataBackupConfiguration](#databackupconfiguration) | false | +Name | Description | Type +--------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------- +`s3Credentials ` | The credentials to use to upload data to S3 - *mandatory* | [S3Credentials](#S3Credentials) +`endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string +`destinationPath` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string +`serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string +`wal ` | The configuration for the backup of the WAL stream. When not defined, WAL files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [*WalBackupConfiguration](#WalBackupConfiguration) +`data ` | The configuration to be used to backup the data files When not defined, base backups files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [*DataBackupConfiguration](#DataBackupConfiguration) + -## BootstrapConfiguration +## `BootstrapConfiguration` BootstrapConfiguration contains information about how to create the PostgreSQL cluster. Only a single bootstrap method can be defined among the supported ones. `initdb` will be used as the bootstrap method if left unspecified. Refer to the Bootstrap page of the documentation for more information. -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| initdb | Bootstrap the cluster via initdb | *[BootstrapInitDB](#bootstrapinitdb) | false | -| recovery | Bootstrap the cluster from a backup | *[BootstrapRecovery](#bootstraprecovery) | false | +Name | Description | Type +-------- | ----------------------------------- | ---------------------------------------- +`initdb ` | Bootstrap the cluster via initdb | [*BootstrapInitDB](#BootstrapInitDB) +`recovery` | Bootstrap the cluster from a backup | [*BootstrapRecovery](#BootstrapRecovery) + -## BootstrapInitDB +## `BootstrapInitDB` BootstrapInitDB is the configuration of the bootstrap process when initdb is used Refer to the Bootstrap page of the documentation for more information. -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| database | Name of the database used by the application. Default: `app`. | string | true | -| owner | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. | string | true | -| secret | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | *corev1.LocalObjectReference | false | -| redwood | If we need to enable/disable Redwood compatibility. Requires EPAS and for EPAS defaults to true | *bool | false | -| options | The list of options that must be passed to initdb when creating the cluster | []string | false | +Name | Description | Type +-------- | -------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- +`database` | Name of the database used by the application. Default: `app`. - *mandatory* | string +`owner ` | Name of the owner of the database in the instance to be used by applications. Defaults to the value of the `database` key. - *mandatory* | string +`secret ` | Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch | [*corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) +`redwood ` | If we need to enable/disable Redwood compatibility. Requires EPAS and for EPAS defaults to true | *bool +`options ` | The list of options that must be passed to initdb when creating the cluster | []string + -## BootstrapRecovery +## `BootstrapRecovery` BootstrapRecovery contains the configuration required to restore the backup with the specified name and, after having changed the password with the one chosen for the superuser, will use it to bootstrap a full cluster cloning all the instances from the restored primary. Refer to the Bootstrap page of the documentation for more information. -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| backup | The backup we need to restore | corev1.LocalObjectReference | true | -| recoveryTarget | By default the recovery will end as soon as a consistent state is reached: in this case that means at the end of a backup. This option allows to fine tune the recovery process | *[RecoveryTarget](#recoverytarget) | false | +Name | Description | Type +-------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- +`backup ` | The backup we need to restore - *mandatory* | [corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) +`recoveryTarget` | By default the recovery will end as soon as a consistent state is reached: in this case that means at the end of a backup. This option allows to fine tune the recovery process | [*RecoveryTarget](#RecoveryTarget) + -## Cluster +## `Cluster` Cluster is the Schema for the PostgreSQL API -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| metadata | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectmeta-v1-meta) | false | -| spec | Specification of the desired behavior of the cluster. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ClusterSpec](#clusterspec) | false | -| status | Most recently observed status of the cluster. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ClusterStatus](#clusterstatus) | false | +Name | Description | Type +-------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ +`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta) +`spec ` | Specification of the desired behavior of the cluster. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ClusterSpec](#ClusterSpec) +`status ` | Most recently observed status of the cluster. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ClusterStatus](#ClusterStatus) + -## ClusterList +## `ClusterList` ClusterList contains a list of Cluster -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| metadata | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#listmeta-v1-meta) | false | -| items | List of clusters | \[][Cluster](#cluster) | true | +Name | Description | Type +-------- | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- +`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#listmeta-v1-meta) +`items ` | List of clusters - *mandatory* | [[]Cluster](#Cluster) + -## ClusterSpec +## `ClusterSpec` ClusterSpec defines the desired state of Cluster -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| description | Description of this PostgreSQL cluster | string | false | -| imageName | Name of the container image | string | false | -| postgresUID | The UID of the `postgres` user inside the image, defaults to `26` | int64 | false | -| postgresGID | The GID of the `postgres` user inside the image, defaults to `26` | int64 | false | -| instances | Number of instances required in the cluster | int32 | true | -| minSyncReplicas | Minimum number of instances required in synchronous replication with the primary. Undefined or 0 allow writes to complete when no standby is available. | int32 | false | -| maxSyncReplicas | The target value for the synchronous replication quorum, that can be decreased if the number of ready standbys is lower than this. Undefined or 0 disable synchronous replication. | int32 | false | -| postgresql | Configuration of the PostgreSQL server | [PostgresConfiguration](#postgresconfiguration) | false | -| bootstrap | Instructions to bootstrap this cluster | *[BootstrapConfiguration](#bootstrapconfiguration) | false | -| superuserSecret | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | *corev1.LocalObjectReference | false | -| imagePullSecrets | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | []corev1.LocalObjectReference | false | -| storage | Configuration of the storage of the instances | [StorageConfiguration](#storageconfiguration) | false | -| startDelay | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32 | false | -| stopDelay | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32 | false | -| affinity | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#affinityconfiguration) | false | -| resources | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. | corev1.ResourceRequirements | false | -| primaryUpdateStrategy | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy | false | -| backup | The configuration to be used for backups | *[BackupConfiguration](#backupconfiguration) | false | -| nodeMaintenanceWindow | Define a maintenance window for the Kubernetes nodes | *[NodeMaintenanceWindow](#nodemaintenancewindow) | false | -| licenseKey | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string | false | -| monitoring | The configuration of the monitoring infrastructure of this cluster | *[MonitoringConfiguration](#monitoringconfiguration) | false | - - -## ClusterStatus +Name | Description | Type +--------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- +`description ` | Description of this PostgreSQL cluster | string +`imageName ` | Name of the container image | string +`postgresUID ` | The UID of the `postgres` user inside the image, defaults to `26` | int64 +`postgresGID ` | The GID of the `postgres` user inside the image, defaults to `26` | int64 +`instances ` | Number of instances required in the cluster - *mandatory* | int32 +`minSyncReplicas ` | Minimum number of instances required in synchronous replication with the primary. Undefined or 0 allow writes to complete when no standby is available. | int32 +`maxSyncReplicas ` | The target value for the synchronous replication quorum, that can be decreased if the number of ready standbys is lower than this. Undefined or 0 disable synchronous replication. | int32 +`postgresql ` | Configuration of the PostgreSQL server | [PostgresConfiguration](#PostgresConfiguration) +`bootstrap ` | Instructions to bootstrap this cluster | [*BootstrapConfiguration](#BootstrapConfiguration) +`superuserSecret ` | The secret containing the superuser password. If not defined a new secret will be created with a randomly generated password | [*corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) +`imagePullSecrets ` | The list of pull secrets to be used to pull the images. If the license key contains a pull secret that secret will be automatically included. | [[]corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) +`storage ` | Configuration of the storage of the instances | [StorageConfiguration](#StorageConfiguration) +`startDelay ` | The time in seconds that is allowed for a PostgreSQL instance to successfully start up (default 30) | int32 +`stopDelay ` | The time in seconds that is allowed for a PostgreSQL instance node to gracefully shutdown (default 30) | int32 +`affinity ` | Affinity/Anti-affinity rules for Pods | [AffinityConfiguration](#AffinityConfiguration) +`resources ` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core) +`primaryUpdateStrategy` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (`unsupervised` - default) or manual (`supervised`) | PrimaryUpdateStrategy +`backup ` | The configuration to be used for backups | [*BackupConfiguration](#BackupConfiguration) +`nodeMaintenanceWindow` | Define a maintenance window for the Kubernetes nodes | [*NodeMaintenanceWindow](#NodeMaintenanceWindow) +`licenseKey ` | The license key of the cluster. When empty, the cluster operates in trial mode and after the expiry date (default 30 days) the operator will cease any reconciliation attempt. For details, please refer to the license agreement that comes with the operator. | string +`monitoring ` | The configuration of the monitoring infrastructure of this cluster | [*MonitoringConfiguration](#MonitoringConfiguration) + + + +## `ClusterStatus` ClusterStatus defines the observed state of Cluster -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| instances | Total number of instances in the cluster | int32 | false | -| readyInstances | Total number of ready instances in the cluster | int32 | false | -| instancesStatus | Instances status | map[utils.PodStatus][]string | false | -| latestGeneratedNode | ID of the latest generated node (used to avoid node name clashing) | int32 | false | -| currentPrimary | Current primary instance | string | false | -| targetPrimary | Target primary instance, this is different from the previous one during a switchover or a failover | string | false | -| pvcCount | How many PVCs have been created by this cluster | int32 | false | -| jobCount | How many Jobs have been created by this cluster | int32 | false | -| danglingPVC | List of all the PVCs created by this cluster and still available which are not attached to a Pod | []string | false | -| initializingPVC | List of all the PVCs that are being initialized by this cluster | []string | false | -| licenseStatus | Status of the license | licensekey.Status | false | -| writeService | Current write pod | string | false | -| readService | Current list of read pods | string | false | -| phase | Current phase of the cluster | string | false | -| phaseReason | Reason for the current phase | string | false | - - -## DataBackupConfiguration +Name | Description | Type +---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- +`instances ` | Total number of instances in the cluster | int32 +`readyInstances ` | Total number of ready instances in the cluster | int32 +`instancesStatus ` | Instances status | map[utils.PodStatus][]string +`latestGeneratedNode ` | ID of the latest generated node (used to avoid node name clashing) | int32 +`currentPrimary ` | Current primary instance | string +`targetPrimary ` | Target primary instance, this is different from the previous one during a switchover or a failover | string +`pvcCount ` | How many PVCs have been created by this cluster | int32 +`jobCount ` | How many Jobs have been created by this cluster | int32 +`danglingPVC ` | List of all the PVCs created by this cluster and still available which are not attached to a Pod | []string +`initializingPVC ` | List of all the PVCs that are being initialized by this cluster | []string +`licenseStatus ` | Status of the license | licensekey.Status +`writeService ` | Current write pod | string +`readService ` | Current list of read pods | string +`phase ` | Current phase of the cluster | string +`phaseReason ` | Reason for the current phase | string +`secretsResourceVersion` | The list of resource versions of the secrets managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the secret data | [SecretsResourceVersion](#SecretsResourceVersion) + + + +## `DataBackupConfiguration` DataBackupConfiguration is the configuration of the backup of the data directory -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| compression | Compress a backup file (a tar file per tablespace) while streaming it to the object store. Available options are empty string (no compression, default), `gzip` or `bzip2`. | CompressionType | false | -| encryption | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType | false | -| immediateCheckpoint | Control whether the I/O workload for the backup initial checkpoint will be limited, according to the `checkpoint_completion_target` setting on the PostgreSQL server. If set to true, an immediate checkpoint will be used, meaning PostgreSQL will complete the checkpoint as soon as possible. `false` by default. | bool | false | -| jobs | The number of parallel jobs to be used to upload the backup, defaults to 2 | *int32 | false | +Name | Description | Type +------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- +`compression ` | Compress a backup file (a tar file per tablespace) while streaming it to the object store. Available options are empty string (no compression, default), `gzip` or `bzip2`. | CompressionType +`encryption ` | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType +`immediateCheckpoint` | Control whether the I/O workload for the backup initial checkpoint will be limited, according to the `checkpoint_completion_target` setting on the PostgreSQL server. If set to true, an immediate checkpoint will be used, meaning PostgreSQL will complete the checkpoint as soon as possible. `false` by default. | bool +`jobs ` | The number of parallel jobs to be used to upload the backup, defaults to 2 | *int32 + -## MonitoringConfiguration +## `MonitoringConfiguration` MonitoringConfiguration is the type containing all the monitoring configuration for a certain cluster -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| customQueriesConfigMap | The list of config maps containing the custom queries | []corev1.ConfigMapKeySelector | false | -| customQueriesSecret | The list of secrets containing the custom queries | []corev1.SecretKeySelector | false | +Name | Description | Type +---------------------- | ----------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- +`customQueriesConfigMap` | The list of config maps containing the custom queries | [[]corev1.ConfigMapKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#configmapkeyselector-v1-core) +`customQueriesSecret ` | The list of secrets containing the custom queries | [[]corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) + -## NodeMaintenanceWindow +## `NodeMaintenanceWindow` NodeMaintenanceWindow contains information that the operator will use while upgrading the underlying node. This option is only useful when the chosen storage prevents the Pods from being freely moved across nodes. -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| inProgress | Is there a node maintenance activity in progress? | bool | true | -| reusePVC | Reuse the existing PVC (wait for the node to come up again) or not (recreate it elsewhere) | *bool | true | +Name | Description | Type +---------- | ------------------------------------------------------------------------------------------ | ----- +`inProgress` | Is there a node maintenance activity in progress? - *mandatory* | bool +`reusePVC ` | Reuse the existing PVC (wait for the node to come up again) or not (recreate it elsewhere) - *mandatory* | *bool + -## PostgresConfiguration +## `PostgresConfiguration` PostgresConfiguration defines the PostgreSQL configuration -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| parameters | PostgreSQL configuration options (postgresql.conf) | map[string]string | false | -| pg_hba | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | []string | false | +Name | Description | Type +---------- | ----------------------------------------------------------------------------------------- | ----------------- +`parameters` | PostgreSQL configuration options (postgresql.conf) | map[string]string +`pg_hba ` | PostgreSQL Host Based Authentication rules (lines to be appended to the pg_hba.conf file) | []string + -## RecoveryTarget +## `RecoveryTarget` RecoveryTarget allows to configure the moment where the recovery process will stop. All the target options except TargetTLI are mutually exclusive. -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| targetTLI | The target timeline (\"latest\", \"current\" or a positive integer) | string | false | -| targetXID | The target transaction ID | string | false | -| targetName | The target name (to be previously created with `pg_create_restore_point`) | string | false | -| targetLSN | The target LSN (Log Sequence Number) | string | false | -| targetTime | The target time, in any unambiguous representation allowed by PostgreSQL | string | false | -| targetImmediate | End recovery as soon as a consistent state is reached | *bool | false | -| exclusive | Set the target to be exclusive (defaults to true) | *bool | false | +Name | Description | Type +--------------- | ------------------------------------------------------------------------- | ------ +`targetTLI ` | The target timeline ("latest", "current" or a positive integer) | string +`targetXID ` | The target transaction ID | string +`targetName ` | The target name (to be previously created with `pg_create_restore_point`) | string +`targetLSN ` | The target LSN (Log Sequence Number) | string +`targetTime ` | The target time, in any unambiguous representation allowed by PostgreSQL | string +`targetImmediate` | End recovery as soon as a consistent state is reached | *bool +`exclusive ` | Set the target to be exclusive (defaults to true) | *bool + -## RollingUpdateStatus +## `RollingUpdateStatus` RollingUpdateStatus contains the information about an instance which is being updated -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| imageName | The image which we put into the Pod | string | true | -| startedAt | When the update has been started | metav1.Time | false | +Name | Description | Type +--------- | ----------------------------------- | ------------------------------------------------------------------------------------------------ +`imageName` | The image which we put into the Pod - *mandatory* | string +`startedAt` | When the update has been started | [metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) + -## S3Credentials +## `S3Credentials` S3Credentials is the type for the credentials to be used to upload files to S3 -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| accessKeyId | The reference to the access key id | corev1.SecretKeySelector | true | -| secretAccessKey | The reference to the secret access key | corev1.SecretKeySelector | true | +Name | Description | Type +--------------- | -------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- +`accessKeyId ` | The reference to the access key id - *mandatory* | [corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) +`secretAccessKey` | The reference to the secret access key - *mandatory* | [corev1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core) + -## StorageConfiguration +## `ScheduledBackup` -StorageConfiguration is the configuration of the storage of the PostgreSQL instances +ScheduledBackup is the Schema for the scheduledbackups API -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| storageClass | StorageClass to use for database data (`PGDATA`). Applied after evaluating the PVC template, if available. If not specified, generated PVCs will be satisfied by the default storage class | *string | false | -| size | Size of the storage. Required if not already specified in the PVC template. Changes to this field are automatically reapplied to the created PVCs. Size cannot be decreased. | string | true | -| resizeInUseVolumes | Resize existent PVCs, defaults to true | *bool | false | -| pvcTemplate | Template to be used to generate the Persistent Volume Claim | *corev1.PersistentVolumeClaimSpec | false | +Name | Description | Type +-------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ +`metadata` | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta) +`spec ` | Specification of the desired behavior of the ScheduledBackup. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ScheduledBackupSpec](#ScheduledBackupSpec) +`status ` | Most recently observed status of the ScheduledBackup. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ScheduledBackupStatus](#ScheduledBackupStatus) + -## WalBackupConfiguration +## `ScheduledBackupList` -WalBackupConfiguration is the configuration of the backup of the WAL stream +ScheduledBackupList contains a list of ScheduledBackup -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| compression | Compress a WAL file before sending it to the object store. Available options are empty string (no compression, default), `gzip` or `bzip2`. | CompressionType | false | -| encryption | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType | false | +Name | Description | Type +-------- | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- +`metadata` | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#listmeta-v1-meta) +`items ` | List of clusters - *mandatory* | [[]ScheduledBackup](#ScheduledBackup) + -## ScheduledBackup +## `ScheduledBackupSpec` -ScheduledBackup is the Schema for the scheduledbackups API +ScheduledBackupSpec defines the desired state of ScheduledBackup -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| metadata | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectmeta-v1-meta) | false | -| spec | Specification of the desired behavior of the ScheduledBackup. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ScheduledBackupSpec](#scheduledbackupspec) | false | -| status | Most recently observed status of the ScheduledBackup. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | [ScheduledBackupStatus](#scheduledbackupstatus) | false | +Name | Description | Type +-------- | -------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- +`suspend ` | If this backup is suspended of not | *bool +`schedule` | The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - *mandatory* | string +`cluster ` | The cluster to backup | [v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#localobjectreference-v1-core) + -## ScheduledBackupList +## `ScheduledBackupStatus` -ScheduledBackupList contains a list of ScheduledBackup +ScheduledBackupStatus defines the observed state of ScheduledBackup -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| metadata | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#listmeta-v1-meta) | false | -| items | List of clusters | \[][ScheduledBackup](#scheduledbackup) | true | +Name | Description | Type +---------------- | -------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- +`lastCheckTime ` | The latest time the schedule | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) +`lastScheduleTime` | Information when was the last time that backup was successfully scheduled. | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) +`nextScheduleTime` | Next time we will run a backup | [*metav1.Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#time-v1-meta) + -## ScheduledBackupSpec +## `SecretsResourceVersion` -ScheduledBackupSpec defines the desired state of ScheduledBackup +SecretsResourceVersion is the resource versions of the secrets managed by the operator -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| suspend | If this backup is suspended of not | *bool | false | -| schedule | The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. | string | true | -| cluster | The cluster to backup | [v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#localobjectreference-v1-core) | false | +Name | Description | Type +------------------------ | ----------------------------------------------------------------- | ------ +`superuserSecretVersion ` | The resource version of the "postgres" user secret - *mandatory* | string +`replicationSecretVersion` | The resource version of the "streaming_replication" user secret - *mandatory* | string +`applicationSecretVersion` | The resource version of the "app" user secret - *mandatory* | string +`caSecretVersion ` | The resource version of the "ca" secret version - *mandatory* | string +`serverSecretVersion ` | The resource version of the PostgreSQL server-side secret version - *mandatory* | string + -## ScheduledBackupStatus +## `StorageConfiguration` -ScheduledBackupStatus defines the observed state of ScheduledBackup +StorageConfiguration is the configuration of the storage of the PostgreSQL instances + +Name | Description | Type +------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------- +`storageClass ` | StorageClass to use for database data (`PGDATA`). Applied after evaluating the PVC template, if available. If not specified, generated PVCs will be satisfied by the default storage class | *string +`size ` | Size of the storage. Required if not already specified in the PVC template. Changes to this field are automatically reapplied to the created PVCs. Size cannot be decreased. - *mandatory* | string +`resizeInUseVolumes` | Resize existent PVCs, defaults to true | *bool +`pvcTemplate ` | Template to be used to generate the Persistent Volume Claim | [*corev1.PersistentVolumeClaimSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#persistentvolumeclaim-v1-core) + + + +## `WalBackupConfiguration` + +WalBackupConfiguration is the configuration of the backup of the WAL stream -| Field | Description | Scheme | Required | -| -------------------- | ------------------------------ | -------------------- | -------- | -| lastCheckTime | The latest time the schedule | *metav1.Time | false | -| lastScheduleTime | Information when was the last time that backup was successfully scheduled. | *metav1.Time | false | -| nextScheduleTime | Next time we will run a backup | *metav1.Time | false | +Name | Description | Type +----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- +`compression` | Compress a WAL file before sending it to the object store. Available options are empty string (no compression, default), `gzip` or `bzip2`. | CompressionType +`encryption ` | Whenever to force the encryption of files (if the bucket is not already configured for that). Allowed options are empty string (use the bucket policy, default), `AES256` and `aws:kms` | EncryptionType diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx index 2af304bda15..b96cf1ec629 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx @@ -112,7 +112,7 @@ kubectl create secret generic minio-creds \ --from-literal=MINIO_SECRET_KEY= ``` -!!! NOTE "Note" +!!! Note Cloud Object Storage credentials will be used only by MinIO Gateway in this case. !!! Important diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx index a111620e663..c0c89663bed 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx @@ -128,7 +128,7 @@ To get a certificate, you need to provide a name for the secret to store the credentials, the cluster name, and a user for this certificate ```shell -kubectl cnp certificate cluster-cert --cnp-cluster cluster-example --cnp-user appuser +kubectl cnp certificate cluster-cert --cnp-cluster cluster-example --cnp-user appuser ``` After the secrete it's created, you can get it using `kubectl` diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx index 668c943cef4..8d69486b837 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx @@ -44,6 +44,7 @@ and the following suite of E2E tests are performed on that cluster: * Restore from backup; * Pod affinity using `NodeSelector`; * Metrics collection; +* Operator pod deletion; * Primary endpoint switch in case of failover in less than 10 seconds; * Primary endpoint switch in case of switchover in less than 20 seconds; * Recover from a degraded state in less than 60 seconds. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx index 883a34ee88a..befdeef638c 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx @@ -24,7 +24,9 @@ navigation: - rolling_update - backup_recovery - postgresql_conf + - operator_conf - storage + - labels_annotations - samples - monitoring - expose_pg_services @@ -36,6 +38,7 @@ navigation: - container_images - operator_capability_levels - api_reference + - release_notes - credits --- @@ -64,7 +67,7 @@ and is available under the [EnterpriseDB Limited Use License](https://www.enterp You can [evaluate Cloud Native PostgreSQL for free](evaluation.md). You need a valid license key to use Cloud Native PostgreSQL in production. -!!! IMPORTANT +!!! Important Currently, based on the [Operator Capability Levels model](operator_capability_levels.md), users can expect a **"Level III - Full Lifecycle"** set of capabilities from the Cloud Native PostgreSQL Operator. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/installation.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/installation.mdx index ef356a15e30..64ce5f8b0a0 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/installation.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/installation.mdx @@ -11,12 +11,12 @@ product: 'Cloud Native Operator' The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.2.1.yaml) +You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.3.0.yaml) as follows: ```sh kubectl apply -f \ - https://get.enterprisedb.io/cnp/postgresql-operator-1.2.1.yaml + https://get.enterprisedb.io/cnp/postgresql-operator-1.3.0.yaml ``` Once you have run the `kubectl` command, Cloud Native PostgreSQL will be installed in your Kubernetes cluster. @@ -92,3 +92,9 @@ the pod will be rescheduled on another node. As far as OpenShift is concerned, details might differ depending on the selected installation method. + +!!! Seealso "Operator configuration" + You can change the default behavior of the operator by overriding + some default options. For more information, please refer to the + ["Operator configuration"](operator_conf.md) section. + diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx index 7bd177d943a..1256c5d2db7 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx @@ -1,5 +1,5 @@ --- -title: "Installation, Configuration and Demployment Demo" +title: "Installation, Configuration and Deployment Demo" description: "Walk through the process of installing, configuring and deploying the Cloud Native PostgreSQL Operator via a browser-hosted Minikube console" navTitle: Install, Configure, Deploy product: 'Cloud Native PostgreSQL Operator' @@ -21,6 +21,7 @@ Want to see what it takes to get the Cloud Native PostgreSQL Operator up and run 1. Installing the Cloud Native PostgreSQL Operator 2. Deploying a three-node PostgreSQL cluster 3. Installing and using the kubectl-cnp plugin +4. Testing failover to verify the resilience of the cluster It will take roughly 5-10 minutes to work through. @@ -64,7 +65,7 @@ You will see one node called `minikube`. If the status isn't yet "Ready", wait f Now that the Minikube cluster is running, you can proceed with Cloud Native PostgreSQL installation as described in the ["Installation"](installation.md) section: ```shell -kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.2.0.yaml +kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.3.0.yaml __OUTPUT__ namespace/postgresql-operator-system created customresourcedefinition.apiextensions.k8s.io/backups.postgresql.k8s.enterprisedb.io created @@ -164,13 +165,13 @@ metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"postgresql.k8s.enterprisedb.io/v1","kind":"Cluster","metadata":{"annotations":{},"name":"cluster-example","namespace":"default"},"spec":{"instances":3,"primaryUpdateStrategy":"unsupervised","storage":{"size":"1Gi"}}} - creationTimestamp: "2021-04-07T00:33:43Z" + creationTimestamp: "2021-04-27T15:11:21Z" generation: 1 name: cluster-example namespace: default - resourceVersion: "1806" + resourceVersion: "2572" selfLink: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/clusters/cluster-example - uid: 38ddc347-3f2e-412a-aa14-a26904e1a49e + uid: 6a693046-a9d0-41b0-ac68-7a96d7e2ff07 spec: affinity: topologyKey: "" @@ -196,14 +197,14 @@ status: instances: 3 instancesStatus: healthy: - - cluster-example-3 - cluster-example-1 - cluster-example-2 + - cluster-example-3 latestGeneratedNode: 3 licenseStatus: isImplicit: true isTrial: true - licenseExpiration: "2021-05-07T00:33:43Z" + licenseExpiration: "2021-05-27T15:11:21Z" licenseStatus: Implicit trial license repositoryAccess: false valid: true @@ -211,6 +212,12 @@ status: pvcCount: 3 readService: cluster-example-r readyInstances: 3 + secretsResourceVersion: + applicationSecretVersion: "1479" + caSecretVersion: "1475" + replicationSecretVersion: "1477" + serverSecretVersion: "1476" + superuserSecretVersion: "1478" targetPrimary: cluster-example-1 writeService: cluster-example-rw ``` @@ -238,7 +245,7 @@ curl -sSfL \ sudo sh -s -- -b /usr/local/bin __OUTPUT__ EnterpriseDB/kubectl-cnp info checking GitHub for latest tag -EnterpriseDB/kubectl-cnp info found version: 1.2.1 for v1.2.1/linux/x86_64 +EnterpriseDB/kubectl-cnp info found version: 1.3.0 for v1.3.0/linux/x86_64 EnterpriseDB/kubectl-cnp info installed /usr/local/bin/kubectl-cnp ``` @@ -247,7 +254,7 @@ The `cnp` command is now available in kubectl: ```shell kubectl cnp status cluster-example __OUTPUT__ -Cluster in healthy state +Cluster in healthy state Name: cluster-example Namespace: default PostgreSQL Image: quay.io/enterprisedb/postgresql:13.2 @@ -256,23 +263,81 @@ Instances: 3 Ready instances: 3 Instances status -Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart --------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- -cluster-example-1 0/6000060 6941211174657425425 ✓ ✗ ✗ ✗ -cluster-example-2 0/6000060 0/6000060 6941211174657425425 ✗ ✓ ✗ ✗ -cluster-example-3 0/6000060 0/6000060 6941211174657425425 ✗ ✓ ✗ ✗ +Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart Status +-------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- ------ +cluster-example-1 0/5000060 6955855494195015697 ✓ ✗ ✗ ✗ OK +cluster-example-2 0/5000060 0/5000060 6955855494195015697 ✗ ✓ ✗ ✗ OK +cluster-example-3 0/5000060 0/5000060 6955855494195015697 ✗ ✓ ✗ ✗ OK ``` !!! Note "There's more" See [the Cloud Native PostgreSQL Plugin page](cnp-plugin/) for more commands and options. +## Testing failover + +As our status checks show, we're running two replicas - if something happens to the primary instance of PostgreSQL, the cluster will fail over to one of them. Let's demonstrate this by killing the primary pod: + +```shell +kubectl delete pod --wait=false cluster-example-1 +__OUTPUT__ +pod "cluster-example-1" deleted +``` + +This simulates a hard shutdown of the server - a scenario where something has gone wrong. + +Now if we check the status... +```shell +kubectl cnp status cluster-example +__OUTPUT__ +Failing over Failing over to cluster-example-2 +Name: cluster-example +Namespace: default +PostgreSQL Image: quay.io/enterprisedb/postgresql:13.2 +Primary instance: cluster-example-2 +Instances: 3 +Ready instances: 2 + +Instances status +Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart Status +-------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- ------ +cluster-example-1 - - - - - - - - unable to upgrade connection: container not found ("postgres") - +cluster-example-2 0/7000230 6955855494195015697 ✓ ✗ ✗ ✗ OK +cluster-example-3 0/70000A0 0/70000A0 6955855494195015697 ✗ ✓ ✗ ✗ OK +``` + +...the failover process has begun, with the second pod promoted to primary. Once the failed pod has restarted, it will become a replica of the new primary: + +```shell +kubectl cnp status cluster-example +__OUTPUT__ +Cluster in healthy state +Name: cluster-example +Namespace: default +PostgreSQL Image: quay.io/enterprisedb/postgresql:13.2 +Primary instance: cluster-example-2 +Instances: 3 +Ready instances: 3 + +Instances status +Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart Status +-------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- ------ +cluster-example-1 0/7004268 0/7004268 6955855494195015697 ✗ ✓ ✗ ✗ OK +cluster-example-2 0/7004268 6955855494195015697 ✓ ✗ ✗ ✗ OK +cluster-example-3 0/7004268 0/7004268 6955855494195015697 ✗ ✓ ✗ ✗ OK +``` + ### Further reading This is all it takes to get a PostgreSQL cluster up and running, but of course there's a lot more possible - and certainly much more that is prudent before you should ever deploy in a production environment! -- For information on using the Cloud Native PostgreSQL Operator to deploy on public cloud platforms, see the [Cloud Setup](cloud_setup/) section. +- Deploying on public cloud platforms: see the [Cloud Setup](cloud_setup/) section. + +- Design goals and possibilities offered by the Cloud Native PostgreSQL Operator: check out the [Architecture](architecture/) and [Use cases](use_cases/) sections. + +- Configuring a secure and reliable system: read through the [Security](security/), [Failure Modes](failure_modes/) and [Backup and Recovery](backup_recovery/) sections. + +- Webinar: [Watch Gabriele Bartolini discuss and demonstrate Cloud Native PostgreSQL lifecycle management](https://www.youtube.com/watch?v=S-I9y-HnAnI) -- For the design goals and possibilities offered by the Cloud Native PostgreSQL Operator, check out the [Architecture](architecture/) and [Use cases](use_cases/) sections. +- Development: [Leonardo Cecchi writes about setting up a local environment using Cloud Native PostgreSQL for application development](https://www.enterprisedb.com/blog/cloud-native-postgresql-application-developers) -- And for details on what it takes to configure a secure and reliable system, read through the [Security](security/), [Failure Modes](failure_modes/) and [Backup and Recovery](backup_recovery/) sections. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/labels_annotations.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/labels_annotations.mdx new file mode 100644 index 00000000000..a9235008ed6 --- /dev/null +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/labels_annotations.mdx @@ -0,0 +1,84 @@ +--- +title: 'Labels and annotations' +originalFilePath: 'src/labels_annotations.md' +product: 'Cloud Native Operator' +--- + +Resources in Kubernetes are organized in a flat structure, with no hierarchical +information or relationship between them. However, such resources and objects +can be linked together and put in relationship through **labels** and +**annotations**. + +!!! info + For more information, please refer to the Kubernetes documentation on + [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) and + [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). + +In short: + +- an annotation is used to assign additional non-identifying information to + resources with the goal to facilitate integration with external tools +- a label is used to group objects and query them through Kubernetes' native + selector capability + +You can select one or more labels and/or annotations you will use +in your Cloud Native PostgreSQL deployments. Then you need to configure the operator +so that when you define these labels and/or annotations in a cluster's metadata, +they are automatically inherited by all resources created by it (including pods). + +!!! Note + Label and annotation inheritance is the technique adopted by Cloud Native + PostgreSQL in lieu of alternative approaches such as pod templates. + +## Pre-requisites + +By default, no label or annotation defined in the cluster's metadata is +inherited by the associated resources. +In order to enable label/annotation inheritance, you need to follow the +instructions provided in the ["Operator configuration"](operator_conf.md) section. + +Below we will continue on that example and limit it to the following: + +- annotations: `categories` +- labels: `app`, `environment`, and `workload` + +!!! Note + Feel free to select the names that most suit your context for both + annotations and labels. Remember that you can also use wildcards + in naming and adopt strategies like `mycompany/*` for all labels + or annotations starting with `mycompany/` to be inherited. + +## Defining cluster's metadata + +When defining the cluster, **before** any resource is deployed, you can +properly set the metadata as follows: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example + annotations: + categories: database + labels: + environment: production + workload: database + app: sso +spec: + # ... +``` + +Once the cluster is deployed, you can verify, for example, that the labels +have been correctly set in the pods with: + +```shell +kubectl get pods --show-labels +``` + +## Current limitations + +Cloud Native PostgreSQL does not currently support synchronization of labels +or annotations after a resource has been created. For example, suppose you +deploy a cluster. When you add a new annotation to be inherited and define it +in the existing cluster, the operator will not automatically set it +on the associated resources. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx index 827291d6aad..0c7b1e489e0 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx @@ -44,6 +44,9 @@ kubectl rollout restart deployment -n [NAMESPACE_NAME_HERE] \ postgresql-operator-controller-manager ``` +!!! Seealso "Operator configuration" + For more information, please refer to the ["Operator configuration"](operator_conf.md) section. + The validity of the license key can be checked inside the cluster status. ```sh diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx index d6b04e4225c..32d00655b38 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx @@ -5,7 +5,7 @@ product: 'Cloud Native Operator' --- For each PostgreSQL instance, the operator provides an exporter of metrics for -[Prometheus](https://prometheus.io/) via HTTP, on port 8000. +[Prometheus](https://prometheus.io/) via HTTP, on port 9187. The operator comes with a predefined set of metrics, as well as a highly configurable and customizable system to define additional queries via one or more `ConfigMap` objects - and, future versions, `Secret` too. @@ -13,7 +13,7 @@ more `ConfigMap` objects - and, future versions, `Secret` too. The exporter can be accessed as follows: ```shell -curl http://:8000/metrics +curl http://:9187/metrics ``` All monitoring queries are: diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx index 79635f67015..b9c9151477f 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx @@ -97,8 +97,8 @@ switchover operations. The operator is designed to manage a PostgreSQL cluster with a single database. The operator transparently manages access to the database through -two Kubernetes services automatically provisioned and managed for read-write -and read-only workloads. +three Kubernetes services automatically provisioned and managed for read-write, +read, and read-only workloads. Using the convention over configuration approach, the operator creates a database called `app`, by default owned by a regular Postgres user with the same name. Both the database name and the user name can be specified if @@ -113,6 +113,13 @@ For InfoSec requirements, the operator does not need privileged mode for the execution of containers and access to volumes both in the operator and in the operand. +### Affinity + +The operator supports basic pod affinity/anti-affinity rules to deploy PostgreSQL +pods on different nodes, based on the selected `topologyKey` (for example `node` or +`zone`). Additionally, it supports node affinity through the `nodeSelector` +configuration attribute, as [expected by Kubernetes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). + ### License keys The operator comes with support for license keys, with the possibility to @@ -121,8 +128,8 @@ Cloud Native PostgreSQL has been programmed to create an implicit 30-day trial license for every deployed cluster. License keys are signed strings that the operator can verify using an asymmetric key technique. The content is a JSON object that includes the -product, the cluster identifiers (namespace and name), the number of -instances, the expiration date, and, if required, the credentials to be used +type, the product, the expiration date, and, if required, the cluster +identifiers (namespace and name), the number of instances, the credentials to be used as a secret by the operator to pull down an image from a protected container registry. Beyond the expiration date, the operator will stop any reconciliation process until the license key is restored. @@ -137,7 +144,7 @@ controlled PostgreSQL instance to converge to the required status of the cluster (for example: if the cluster status reports that pod `-1` is the primary, pod `-1` needs to promote itself while the other pods need to follow pod `-1`). The same status is used by Kubernetes client applications to -provide details, including the OpenShift dashboard. +provide details, including the `cnp` plugin for `kubectl` and the OpenShift dashboard. ### Operator's certification authority @@ -152,7 +159,8 @@ The operator automatically creates a certification authority for every PostgreSQ cluster, which is used to issue and renew TLS certificates for the authentication of streaming replication standby servers and applications (instead of passwords). The operator will use the Certification Authority to sign every cluster -certification authority. +certification authority. Certificates can be issued with the `cnp` plugin +for `kubectl`. ### TLS connections @@ -180,8 +188,9 @@ such as `max_connections` and `max_wal_senders`. The operator can be installed through a Kubernetes manifest via `kubectl apply`, to be used in a traditional Kubernetes installation in public -and private cloud environments. Additionally, it can be deployed on OpenShift -Container Platform via OperatorHub. +and private cloud environments. Additionally, it can be deployed through +the Operator Lifecycle Manager (OLM) from OperatorHub.io and the OpenShift +Container Platform by RedHat. ### Convention over configuration @@ -216,7 +225,8 @@ starting from the replicas by dropping the existing pod and creating a new one with the new requested operand image that reuses the underlying storage. Depending on the value of the `primaryUpdateStrategy`, the operator proceeds with a switchover before updating the former primary (`unsupervised`) or waits -for the user to manually issue the switchover procedure (`supervised`). +for the user to manually issue the switchover procedure (`supervised`) via the +`cnp` plugin for `kubectl`. Which setting to use depends on the business requirements as the operation might generate some downtime for the applications, from a few seconds to minutes based on the actual database workload. @@ -363,13 +373,15 @@ alerting, trending, log processing. This might involve the use of external tools such as Prometheus, Grafana, Fluent Bit, as well as extensions in the PostgreSQL engine for the output of error logs directly in JSON format. -### Prometheus exporter infrastructure +### Prometheus exporter with configurable queries The instance manager provides a pluggable framework and, via its own web server, exposes an endpoint to export metrics for the [Prometheus](https://prometheus.io/) monitoring and alerting tool. -Currently, only basic metrics and the `pg_stat_archiver` system view -for PostgreSQL have been implemented. +The operator supports custom monitoring queries defined as `ConfigMap` +and `Secret` objects using a syntax that is compatible with +[`postgres_exporter` for Prometheus](https://github.com/prometheus-community/postgres_exporter). + ### Kubernetes events diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_conf.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_conf.mdx new file mode 100644 index 00000000000..67809bdff32 --- /dev/null +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_conf.mdx @@ -0,0 +1,86 @@ +--- +title: 'Operator configuration' +originalFilePath: 'src/operator_conf.md' +product: 'Cloud Native Operator' +--- + +The operator for Cloud Native PostgreSQL is installed from a standard +deployment manifest and follows the convention over configuration paradigm. +While this is fine in most cases, there are some scenarios where you want +to change the default behavior, such as: + +- setting a company license key that is shared by all deployments managed + by the operator +- defining annotations and labels to be inherited by all resources created + by the operator and that are set in the cluster resource +- defining a different default image for PostgreSQL or an additional pull secret + +By the default, the operator is installed in the `postgresql-operator-system` +namespace as a Kubernetes `Deployment` called `postgresql-operator-controller-manager`. + +!!! Note + In the examples below we assume the default name and namespace for the operator deployment. + +The behavior of the operator can be customized through a `ConfigMap` that +is located in the same namespace of the operator deployment and with the +same name of the operator followed by the `-config` suffix. + +As a result, if you have installed the operator using the standard deployment +manifest, the default `ConfigMap` is `postgresql-operator-controller-manager-config` +in the `postgresql-operator-system` namespace. + +!!! Important + Any change to the config map will not be automatically detected by the operator, + - and as such, it needs to be reloaded (see below). Moreover, changes only + apply to the resources created after the configuration is reloaded. + +## Available options + +The operator looks for the following environment variables to be defined in the config map: + +Name | Description +---- | ----------- +`EDB_LICENSE_KEY` | default license key (to be used only if the cluster does not define one) +`INHERITED_ANNOTATIONS` | list of annotation names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods +`INHERITED_LABELS` | list of label names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods +`PULL_SECRET_NAME` | name of an additional pull secret to be defined in the operator's namespace and to be used to download images + +By default, the above variables are not set. + +Values in `INHERITED_ANNOTATIONS` and `INHERITED_LABELS` support path-like wildcards. For example, the value `example.com/*` will match +both the value `example.com/one` and `example.com/two`. + +## Defining an operator config map + +The example below customizes the behavior of the operator, by defining a +default license key (namely a company key) and the label/annotation names to be +inherited by the resources created by any `Cluster` object that is deployed +at a later time. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgresql-operator-controller-manager-config + namespace: postgresql-operator-system +data: + INHERITED_ANNOTATIONS: categories + INHERITED_LABELS: environment, workload, app + EDB_LICENSE_KEY: +``` + +For the change to be effective, you need to reload the config map by issuing: + +```shell +kubectl rollout restart deployment \ + -n postgresql-operator-system \ + postgresql-operator-controller-manager +``` + +!!! Warning + Customizations will be applied only to `Cluster` resources created + after the reload of the operator deployment. + +Following the above example, if the `Cluster` definition contains a `categories` +annotation and any of the `environment`, `workload`, or `app` labels, these will +be inherited by all the resources generated by the deployment. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx index f10003e32ce..6036c14988b 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/quickstart.mdx @@ -9,11 +9,14 @@ using Cloud Native PostgreSQL on a local Kubernetes cluster in [Minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/) or [Kind](https://kind.sigs.k8s.io/). + + !!! Tip "Live demonstration" Don't want to install anything locally just yet? Try a demonstration directly in your browser: [Cloud Native PostgreSQL Operator Interactive Quickstart](interactive_demo) + RedHat OpenShift Container Platform users can test the certified operator for Cloud Native PostgreSQL on the [Red Hat CodeReady Containers (CRC)](https://developers.redhat.com/products/codeready-containers/overview) for OpenShift. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx new file mode 100644 index 00000000000..e344ecc40d7 --- /dev/null +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx @@ -0,0 +1,102 @@ +--- +title: 'Release notes' +originalFilePath: 'src/release_notes.md' +product: 'Cloud Native Operator' +--- + +History of user-visible changes for Cloud Native PostgreSQL. + +## Version 1.3.0 + +**Release date:** 23 Apr 2021 + +Features: + +- Inheritance of labels and annotations +- Set resource limits for every container + +Security Enhancements: + +- Support for restricted security context constraint on RedHat OpenShift to + limit pod execution to a namespace allocated UID and SELinux context +- Pod security contexts explicitly defined by the operator to run as + non-root, non-privileged and without privilege escalation + +Changes: + +- Prometheus exporter endpoint listening on port 9187 (port 8000 is now + reserved to instance coordination with API server) +- Documentation improvements +- E2E tests enhancements, including GKE environment +- Minor bug fixes + +## Version 1.2.1 + +**Release date:** 6 Apr 2021 + +- ScheduledBackup are no longer owners of the Backups, meaning that backups + are not removed when ScheduledBackup objects are deleted +- Update on ubi8-minimal image to solve RHSA-2021:1024 (Security Advisory: Important) + +## Version 1.2.0 + +**Release date:** 31 Mar 2021 + +- Introduce experimental support for custom monitoring queries as ConfigMap and + Secret objects using a compatible syntax with `postgres_exporter` for Prometheus +- Support Operator Lifecycle Manager (OLM) deployments, with the subsequent + presence on OperatorHub.io +- Expand license key support for company-wide usage (previous restrictions limited only to a single cluster namespace) +- Enhance container security by applying guidelines from the US Department of + Defense (DoD)'s Defense Information Systems Agency (DISA) and the Center for + Internet Security (CIS) and verifying them directly in the pipeline with + Dockle +- Improve E2E tests on AKS +- Minor bug fixes + +## Version 1.1.0 + +**Release date:** 3 Mar 2021 + +- Add `kubectl cnp status` to pretty-print the status of a cluster, including + JSON and YAML output +- Add `kubectl cnp certificate` to enable TLS authentication for client applications +- Add the `-ro` service to route connections to the available hot + standby replicas only, enabling offload of read-only queries from + the cluster's primary instance +- Rollback scaling down a cluster to a value lower than `maxSyncReplicas` +- Request a checkpoint before demoting a former primary +- Send `SIGINT` signal (fast shutdown) to PostgreSQL process on `SIGTERM` +- Minor bug fixes + +## Version 1.0.0 + +**Release date:** 4 Feb 2021 + +The first major stable release of Cloud Native PostgreSQL implements `Cluster`, +`Backup` and `ScheduledBackup` in the API group `postgresql.k8s.enterprisedb.io/v1`. +It uses these resources to create and manage PostgreSQL clusters inside +Kubernetes with the following main capabilities: + +- Direct integration with Kubernetes API server for High Availability, without + requiring an external tool +- Self-Healing capability, through: + - failover of the primary instance by promoting the most aligned replica + - automated recreation of a replica +- Planned switchover of the primary instance by promoting a selected replica +- Scale up/down capabilities +- Definition of an arbitrary number of instances (minimum 1 - one primary server) +- Definition of the *read-write* service to connect your applications to the + only primary server of the cluster +- Definition of the *read* service to connect your applications to any of the + instances for reading workloads +- Support for Local Persistent Volumes with PVC templates +- Reuse of Persistent Volumes storage in Pods +- Rolling updates for PostgreSQL minor versions and operator upgrades +- TLS connections and client certificate authentication +- Continuous backup to an S3 compatible object store +- Full recovery and point-in-time recovery from an S3 compatible object store backup +- Support for synchronous replicas +- Support for node affinity via `nodeSelector` property +- Standard output logging of PostgreSQL error messages + diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx index a42e034ee3b..0be2c0e122f 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/security.mdx @@ -4,8 +4,8 @@ originalFilePath: 'src/security.md' product: 'Cloud Native Operator' --- -This section contains information about security for Cloud Native PostgreSQL -analyzed at 3 different layers: Code, Container and Cluster. +This section contains information about security for Cloud Native PostgreSQL, +that are analyzed at 3 different layers: Code, Container and Cluster. !!! Warning The information contained in this page must not exonerate you from @@ -85,12 +85,29 @@ PostgreSQL servers run as `postgres` system user. No component whatsoever requir Likewise, Volumes access does not require *privileges* mode or `root` privileges either. Proper permissions must be properly assigned by the Kubernetes platform and/or administrators. +The operator explicitly sets the required security contexts. + +On RedHat OpenShift, Cloud Native PostgreSQL runs in `restricted` security context constraint, +the most restrictive one. The goal is to limit the execution of a pod to a namespace allocated UID +and SELinux context. + +!!! Seealso "Security Context Constraints in OpenShift" + For further information on Security Context Constraints (SCC) in + OpenShift, please refer to the + ["Managing SCC in OpenShift"](https://www.openshift.com/blog/managing-sccs-in-openshift) + article. + ### Network Policies The pods created by the `Cluster` resource can be controlled by Kubernetes [network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) to enable/disable inbound and outbound network access at IP and TCP level. +!!! Important + The operator needs to communicate to each instance on TCP port 8000 + to get information about the status of the PostgreSQL server. Make sure + you keep this in mind in case you add any network policy. + Network policies are beyond the scope of this document. Please refer to the ["Network policies"](https://kubernetes.io/docs/concepts/services-networking/network-policies/) section of the Kubernetes documentation for further information. @@ -105,8 +122,10 @@ You can use those files to configure application access to the database. By default, every replica is automatically configured to connect in **physical async streaming replication** with the current primary instance, with a special -user called `streaming_replica`. The connection between nodes is **encrypted** -and authentication is via **TLS client certificates**. +user called `streaming_replica`. The connection between nodes is **encrypted** +and authentication is via **TLS client certificates** (please refer to the +["Client TLS/SSL Connections"](ssl_connections.md#Client TLS/SSL Connections) page +for details). Currently, the operator allows administrators to add `pg_hba.conf` lines directly in the manifest as part of the `pg_hba` section of the `postgresql` configuration. The lines defined in the diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx index f88d4305455..bda8a53b6bb 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx @@ -1,20 +1,169 @@ --- -title: 'Client SSL Connections' +title: 'Client TLS/SSL Connections' originalFilePath: 'src/ssl_connections.md' product: 'Cloud Native Operator' --- -Cloud Native PostgreSQL currently creates a Certification Authority (CA) for -every cluster. This CA is used to sign the certificates to offer to clients -and create a secure connection with them. +The Cloud Native PostgreSQL operator has been designed to work with TLS/SSL for both encryption in transit and +authentication, on server and client sides. Clusters created using the CNP operator come with a Certification +Authority (CA) to create and sign TLS client certificates. Through the `cnp` plugin for `kubectl` you can +issue a new TLS client certificate which can be used to authenticate a user in lieu of passwords. -## Using SSL to connect to pods +Please refer to the following steps to authenticate via TLS/SSL certificates, which assume you have +installed a cluster using the [cluster-example.yaml](../samples/cluster-example.yaml) deployment manifest. +According to the convention over configuration paradigm, that file automatically creates a `app` database +which is owned by a user called `app` (you can change this convention through the `initdb` configuration +in the `bootstrap` section). -Using SSL to connect to the cluster -```sh -psql postgresql://cluster-example-rw:5432/app?sslmode=require +## Issuing a new certificate + +!!! See also "About CNP plugin for kubectl" + Please refer to the ["Certificates" section in the "Cloud Native PostgreSQL Plugin"](cnp-plugin.md#certificates) + page for details on how to use the plugin for `kubectl`. + +You can create a certificate for the `app` user in the `cluster-example` PostgreSQL cluster as follows: + +```shell +kubectl cnp certificate cluster-app \ + --cnp-cluster cluster-example \ + --cnp-user app +``` + +You can now verify the certificate with: + +```shell +kubectl get secret cluster-app \ + -o jsonpath="{.data['tls\.crt']}" \ + | base64 -d | openssl x509 -text -noout \ + | head -n 11 +``` + +Output: + +```console + +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 5d:e1:72:8a:39:9f:ce:51:19:9d:21:ff:1e:4b:24:5d + Signature Algorithm: ecdsa-with-SHA256 + Issuer: OU = default, CN = cluster-example + Validity + Not Before: Mar 22 10:22:14 2021 GMT + Not After : Mar 22 10:22:14 2022 GMT + Subject: CN = app +``` + +As you can see, TLS client certificates by default are created with one year of validity, and with a simple CN that +corresponds to the username in PostgreSQL. This is necessary to leverage the `cert` authentication method for `hostssl` +entries in `pg_hba.conf`. + +## Testing the connection via a TLS certificate + +Now we will test this client certificate by configuring a demo client application that connects to our Cloud Native +PostgreSQL cluster. + +The following manifest called `cert-test.yaml` creates a demo Pod with a test application +in the same namespace where your database cluster is running: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-test +spec: + replicas: 1 + selector: + matchLabels: + app: webtest + template: + metadata: + labels: + app: webtest + spec: + containers: + - image: leonardoce/webtest:1.0.0 + name: cert-test + volumeMounts: + - name: secret-volume-root-ca + mountPath: /etc/secrets/ca + - name: secret-volume-app + mountPath: /etc/secrets/app + ports: + - containerPort: 8080 + env: + - name: DATABASE_URL + value: > + sslkey=/etc/secrets/app/tls.key + sslcert=/etc/secrets/app/tls.crt + sslrootcert=/etc/secrets/ca/ca.crt + host=cluster-example-rw.default.svc + dbname=app + user=app + sslmode=verify-full + - name: SQL_QUERY + value: SELECT 1 + readinessProbe: + httpGet: + port: 8080 + path: /tx + volumes: + - name: secret-volume-root-ca + secret: + secretName: cluster-example-ca + defaultMode: 0600 + - name: secret-volume-app + secret: + secretName: cluster-app + defaultMode: 0600 +``` + +This Pod will mount secrets managed by the Cloud Native PostgreSQL operator, including: + +* TLS client certificate +* TLS client certificate private key +* TLS Certification Authority certificate + +They will be used to create the default resources that `psql` (and other libpq based applications like `pgbench`) +requires to establish a TLS encrypted connection to the Postgres database. + +By default `psql` searches for certificates inside the `~/.postgresql` directory of the current user, but we can use +the sslkey, sslcert, sslrootcert options to point libpq to the actual location of the cryptographic material. +The content of the above files is gathered from the secrets that were previously created by using the `cnp` plugin for +kubectl. + +Now deploy the application: + +```shell +kubectl create -f cert-test.yaml ``` -This will generate a secure connection with the `rw` service of the cluster -`cluster-example`. +Then we will use created Pod as PostgreSQL client to validate SSL connection and +authentication using TLS certificates we just created. + +A readiness probe has been configured to ensure that the application is ready when the database server can be +reached. + +You can verify that the connection works by executing an interactive `bash` inside the Pod's container to run `psql` using the necessary +options. The PostgreSQL server is exposed through the read-write Kubernetes service. We will point the `psql` +command to connect to this service: + +```shell +kubectl exec -it cert-test -- bash -c "psql +'sslkey=/etc/secrets/app/tls.key sslcert=/etc/secrets/appuser/tls.crt +sslrootcert=/etc/secrets/ca/ca.crt host=cluster-example-rw.default.svc dbname=app +user=app sslmode=verify-full' -c 'select version();'" +``` + +Output : + +```console + version +-------------------------------------------------------------------------------------- +------------------ +PostgreSQL 13.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat +8.3.1-5), 64-bit +(1 row) +``` diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx index 346bf8911e9..c3028831f95 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx @@ -4,8 +4,48 @@ originalFilePath: 'src/storage.md' product: 'Cloud Native Operator' --- -Storage is a critical component in a database workload. The operator will create -a Persistent Volume Claims for each PostgreSQL instance and mount then into the Pods. +**Storage is the most critical component in a database workload**. +Expectations are for storage to be always available, scale, perform well, +and guarantee consistency and durability. The same expectations and +requirements that apply to traditional environments, such as virtual machines +and bare metal, are also valid in container contexts managed by Kubernetes. + +!!! Important + Kubernetes has its own specificities when it comes to dynamically + provisioned storage. These include *storage classes*, *persistent + volumes*, and *persistent volume claims*. You need to own these + concepts, on top of all the valuable knowledge you have built over + the years in terms of storage for database workloads on VMs and + physical servers. + +There are two primary methods of access to storage: + +- **network**: either directly or indirectly (think of an NFS volume locally mounted on a host running Kubernetes) +- **local**: directly attached to the node where a Pod is running (this also includes directly attached disks on bare metal installations of Kubernetes) + +Network storage, which is the most common usage pattern in Kubernetes, +presents the same issues of throughput and latency that you can +experience in a traditional environment. These can be accentuated in +a shared environment, where I/O contention with several applications +increases the variability of performance results. + +Local storage enables shared-nothing architectures, which is more suitable +for high transactional and Very Large DataBase (VLDB) workloads, as it +guarantees higher and more predictable performance. + +!!! Warning + Before you deploy a PostgreSQL cluster with Cloud Native PostgreSQL, + make sure that the storage you are using is recommended for database + workloads. Our advice is to clearly set performance expectations by + first benchmarking the storage using tools such as [fio](https://fio.readthedocs.io/en/latest/fio_doc.html), + and then the database using [pgbench](https://www.postgresql.org/docs/current/pgbench.html). + +## Persistent Volume Claim + +The operator creates a persistent volume claim (PVC) for each PostgreSQL +instance, with the goal to store the `PGDATA`, and then mounts it into each Pod. + +## Configuration via a storage class The easier way to configure the storage for a PostgreSQL class is to just request storage of a certain size, like in the following example: @@ -37,7 +77,7 @@ spec: size: 1Gi ``` -## Using a custom PVC template +## Configuration via a PVC template To further customize the generated PVCs, you can provide a PVC template inside the Custom Resource, like in the following example: @@ -61,12 +101,12 @@ spec: volumeMode: Filesystem ``` -## Expanding the storage size used for the instances +## Volume expansion -Kubernetes has an API allowing [expanding PVCs](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims) +Kubernetes exposes an API allowing [expanding PVCs](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims) that is enabled by default but needs to be supported by the underlying `StorageClass`. -To check if a certain `StorageClass` supports volume expansion you can read the `allowVolumeExpansion` +To check if a certain `StorageClass` supports volume expansion, you can read the `allowVolumeExpansion` field for your storage class: ``` @@ -81,14 +121,14 @@ of the `Cluster`, and the operator will apply the change to every PVC. If the `StorageClass` supports [online volume resizing](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim) the change is immediately applied to the Pods. If the underlying Storage Class doesn't support -that, you'll need to delete the Pod to trigger the resize. +that, you will need to delete the Pod to trigger the resize. The best way to proceed is to delete one Pod at a time, starting from replicas and waiting for each Pod to be back up. ### Recreating storage -Suppose the storage class doesn't support volume expansion. In that case, you can still regenerate your cluster +Suppose the storage class does not support volume expansion. In that case, you can still regenerate your cluster on different PVCs by allocating new PVCs with increased storage and then move the database there. This operation is feasible only when the cluster contains more than one node. diff --git a/advocacy_docs/supported-open-source/postgresql/installer/02_installing_postgresql_with_the_graphical_installation_wizard/index.mdx b/advocacy_docs/supported-open-source/postgresql/installer/02_installing_postgresql_with_the_graphical_installation_wizard/index.mdx index c57b55df2c6..4f309f702d5 100644 --- a/advocacy_docs/supported-open-source/postgresql/installer/02_installing_postgresql_with_the_graphical_installation_wizard/index.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installer/02_installing_postgresql_with_the_graphical_installation_wizard/index.mdx @@ -4,21 +4,10 @@ legacyRedirects: - "/edb-docs/d/postgresql/installation-getting-started/installation-guide/13.0/installing_postgresql_with_the_graphical_installation_wizard.html" --- - - The graphical installation wizard provides a quick and easy way to install PostgreSQL on a Mac or Windows system. As the installation wizard’s easy-to-follow dialogs lead you through the installation process, specify information about your system. When the dialogs are complete, the setup wizard will perform an installation based on the selections made during the setup process. -
- -
- -**Note** - -
- -If you are invoking the graphical installer to perform a system upgrade, the installer will preserve the configuration options specified during the previous installation. - -
+!!! Note + If you are invoking the graphical installer to perform a system upgrade, the installer will preserve the configuration options specified during the previous installation. When the PostgreSQL installation finishes, you will be offered the option to invoke the Stack Builder package manager. Stack Builder provides an easy-to-use graphical interface that downloads and installs applications, drivers and utilities and their dependencies. See [Using Stack Builder](../03_using_stackbuilder) for more information. @@ -28,6 +17,6 @@ The PostgreSQL installer is available on the [EDB website](https://www.enterpris
-invoking\_the\_graphical\_installer +invoking_the_graphical_installer
diff --git a/advocacy_docs/supported-open-source/postgresql/installer/03_using_stackbuilder.mdx b/advocacy_docs/supported-open-source/postgresql/installer/03_using_stackbuilder.mdx index 5f9474b83aa..4d937889198 100644 --- a/advocacy_docs/supported-open-source/postgresql/installer/03_using_stackbuilder.mdx +++ b/advocacy_docs/supported-open-source/postgresql/installer/03_using_stackbuilder.mdx @@ -6,9 +6,6 @@ legacyRedirects: - - - The Stack Builder utility provides a graphical interface that simplifies the process of downloading and installing modules that complement your PostgreSQL installation. When you install a module with Stack Builder, Stack Builder automatically resolves any software dependencies. Stack Builder requires Internet access; if your installation of PostgreSQL resides behind a firewall (with restricted Internet access), Stack Builder can download program installers through a proxy server. The module provider determines if the module can be accessed through an HTTP proxy or an FTP proxy; currently, all updates are transferred via an HTTP proxy and the FTP proxy information is not used. @@ -71,14 +68,5 @@ You may occasionally encounter packages that don’t install successfully. If a When the installation is complete, the installer will alert you to the success or failure of the installations of the requested packages. If you were prompted by an installer to restart your computer, re-boot now. -
- -
- -**Note** - -
- -The modules supported by Stack Builder are subject to change and vary by platform. - -
+!!! Note + The modules supported by Stack Builder are subject to change and vary by platform. diff --git a/merge_sources/kubernetes/cloud_native_postgresql/interactive/index.mdx b/merge_sources/kubernetes/cloud_native_postgresql/interactive/index.mdx deleted file mode 100644 index 2fae1ae73a4..00000000000 --- a/merge_sources/kubernetes/cloud_native_postgresql/interactive/index.mdx +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: 'Cloud Native PostgreSQL Interactive Demonstrations' -navTitle: 'Interactive Demos' -product: 'Cloud Native Operator' -indexCards: full -showInteractiveBadge: true ---- - diff --git a/merge_sources/kubernetes/cloud_native_postgresql/interactive/installation_and_deployment.mdx b/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx similarity index 64% rename from merge_sources/kubernetes/cloud_native_postgresql/interactive/installation_and_deployment.mdx rename to merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx index 5d437ad9aad..1256c5d2db7 100644 --- a/merge_sources/kubernetes/cloud_native_postgresql/interactive/installation_and_deployment.mdx +++ b/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx @@ -1,5 +1,5 @@ --- -title: "Installation, Configuration and Demployment Demo" +title: "Installation, Configuration and Deployment Demo" description: "Walk through the process of installing, configuring and deploying the Cloud Native PostgreSQL Operator via a browser-hosted Minikube console" navTitle: Install, Configure, Deploy product: 'Cloud Native PostgreSQL Operator' @@ -21,10 +21,11 @@ Want to see what it takes to get the Cloud Native PostgreSQL Operator up and run 1. Installing the Cloud Native PostgreSQL Operator 2. Deploying a three-node PostgreSQL cluster 3. Installing and using the kubectl-cnp plugin +4. Testing failover to verify the resilience of the cluster It will take roughly 5-10 minutes to work through. -!!! Note This demo is interactive. +!!!interactive This demo is interactive You can follow along right in your browser by clicking the button below. Once the environment initializes, you'll see a terminal open at the bottom of the screen. @@ -40,7 +41,7 @@ __OUTPUT__ * OS release is Ubuntu 18.04.4 LTS * Preparing Kubernetes v1.17.3 on Docker 19.03.6 ... - kubelet.resolv-conf=/run/systemd/resolve/resolv.conf -* Launching Kubernetes ... +* Launching Kubernetes ... * Enabling addons: default-storageclass, storage-provisioner * Configuring local host environment ... * Waiting for cluster to come online ... @@ -61,10 +62,10 @@ You will see one node called `minikube`. If the status isn't yet "Ready", wait f ## Install Cloud Native PostgreSQL -Now that the Minikube cluster is running, you can proceed with Cloud Native PostgreSQL installation as described in the ["Installation"](installation.md) section: +Now that the Minikube cluster is running, you can proceed with Cloud Native PostgreSQL installation as described in the ["Installation"](installation.md) section: ```shell -kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.2.0.yaml +kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.3.0.yaml __OUTPUT__ namespace/postgresql-operator-system created customresourcedefinition.apiextensions.k8s.io/backups.postgresql.k8s.enterprisedb.io created @@ -132,7 +133,7 @@ __OUTPUT__ cluster.postgresql.k8s.enterprisedb.io/cluster-example created ``` -You can check that the pods are being created with the `get pods` command. It'll take a bit to initialize, so if you run that +You can check that the pods are being created with the `get pods` command. It'll take a bit to initialize, so if you run that immediately after applying the cluster configuration you'll see the status as `Init:` or `PodInitializing`: ```shell @@ -164,13 +165,13 @@ metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"postgresql.k8s.enterprisedb.io/v1","kind":"Cluster","metadata":{"annotations":{},"name":"cluster-example","namespace":"default"},"spec":{"instances":3,"primaryUpdateStrategy":"unsupervised","storage":{"size":"1Gi"}}} - creationTimestamp: "2021-04-07T00:33:43Z" + creationTimestamp: "2021-04-27T15:11:21Z" generation: 1 name: cluster-example namespace: default - resourceVersion: "1806" + resourceVersion: "2572" selfLink: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/clusters/cluster-example - uid: 38ddc347-3f2e-412a-aa14-a26904e1a49e + uid: 6a693046-a9d0-41b0-ac68-7a96d7e2ff07 spec: affinity: topologyKey: "" @@ -196,14 +197,14 @@ status: instances: 3 instancesStatus: healthy: - - cluster-example-3 - cluster-example-1 - cluster-example-2 + - cluster-example-3 latestGeneratedNode: 3 licenseStatus: isImplicit: true isTrial: true - licenseExpiration: "2021-05-07T00:33:43Z" + licenseExpiration: "2021-05-27T15:11:21Z" licenseStatus: Implicit trial license repositoryAccess: false valid: true @@ -211,6 +212,12 @@ status: pvcCount: 3 readService: cluster-example-r readyInstances: 3 + secretsResourceVersion: + applicationSecretVersion: "1479" + caSecretVersion: "1475" + replicationSecretVersion: "1477" + serverSecretVersion: "1476" + superuserSecretVersion: "1478" targetPrimary: cluster-example-1 writeService: cluster-example-rw ``` @@ -219,7 +226,7 @@ status: By default, the operator will install the latest available minor version of the latest major version of PostgreSQL when the operator was released. You can override this by setting [the `imageName` key in the `spec` section of - the `Cluster` definition](../api_reference/#clusterspec). + the `Cluster` definition](api_reference/#clusterspec). !!! Important The immutable infrastructure paradigm requires that you always @@ -238,7 +245,7 @@ curl -sSfL \ sudo sh -s -- -b /usr/local/bin __OUTPUT__ EnterpriseDB/kubectl-cnp info checking GitHub for latest tag -EnterpriseDB/kubectl-cnp info found version: 1.2.1 for v1.2.1/linux/x86_64 +EnterpriseDB/kubectl-cnp info found version: 1.3.0 for v1.3.0/linux/x86_64 EnterpriseDB/kubectl-cnp info installed /usr/local/bin/kubectl-cnp ``` @@ -256,23 +263,81 @@ Instances: 3 Ready instances: 3 Instances status -Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart --------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- -cluster-example-1 0/6000060 6941211174657425425 ✓ ✗ ✗ ✗ -cluster-example-2 0/6000060 0/6000060 6941211174657425425 ✗ ✓ ✗ ✗ -cluster-example-3 0/6000060 0/6000060 6941211174657425425 ✗ ✓ ✗ ✗ +Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart Status +-------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- ------ +cluster-example-1 0/5000060 6955855494195015697 ✓ ✗ ✗ ✗ OK +cluster-example-2 0/5000060 0/5000060 6955855494195015697 ✗ ✓ ✗ ✗ OK +cluster-example-3 0/5000060 0/5000060 6955855494195015697 ✗ ✓ ✗ ✗ OK ``` !!! Note "There's more" - See [the Cloud Native PostgreSQL Plugin page](../cnp-plugin/) for more commands and options. + See [the Cloud Native PostgreSQL Plugin page](cnp-plugin/) for more commands and options. + +## Testing failover + +As our status checks show, we're running two replicas - if something happens to the primary instance of PostgreSQL, the cluster will fail over to one of them. Let's demonstrate this by killing the primary pod: + +```shell +kubectl delete pod --wait=false cluster-example-1 +__OUTPUT__ +pod "cluster-example-1" deleted +``` + +This simulates a hard shutdown of the server - a scenario where something has gone wrong. + +Now if we check the status... +```shell +kubectl cnp status cluster-example +__OUTPUT__ +Failing over Failing over to cluster-example-2 +Name: cluster-example +Namespace: default +PostgreSQL Image: quay.io/enterprisedb/postgresql:13.2 +Primary instance: cluster-example-2 +Instances: 3 +Ready instances: 2 + +Instances status +Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart Status +-------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- ------ +cluster-example-1 - - - - - - - - unable to upgrade connection: container not found ("postgres") - +cluster-example-2 0/7000230 6955855494195015697 ✓ ✗ ✗ ✗ OK +cluster-example-3 0/70000A0 0/70000A0 6955855494195015697 ✗ ✓ ✗ ✗ OK +``` + +...the failover process has begun, with the second pod promoted to primary. Once the failed pod has restarted, it will become a replica of the new primary: + +```shell +kubectl cnp status cluster-example +__OUTPUT__ +Cluster in healthy state +Name: cluster-example +Namespace: default +PostgreSQL Image: quay.io/enterprisedb/postgresql:13.2 +Primary instance: cluster-example-2 +Instances: 3 +Ready instances: 3 + +Instances status +Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart Status +-------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- ------ +cluster-example-1 0/7004268 0/7004268 6955855494195015697 ✗ ✓ ✗ ✗ OK +cluster-example-2 0/7004268 6955855494195015697 ✓ ✗ ✗ ✗ OK +cluster-example-3 0/7004268 0/7004268 6955855494195015697 ✗ ✓ ✗ ✗ OK +``` ### Further reading -This is all it takes to get a PostgreSQL cluster up and running, but of course there's a lot more possible - and certainly much more that is prudent before you should ever deploy in a production environment! +This is all it takes to get a PostgreSQL cluster up and running, but of course there's a lot more possible - and certainly much more that is prudent before you should ever deploy in a production environment! + +- Deploying on public cloud platforms: see the [Cloud Setup](cloud_setup/) section. + +- Design goals and possibilities offered by the Cloud Native PostgreSQL Operator: check out the [Architecture](architecture/) and [Use cases](use_cases/) sections. + +- Configuring a secure and reliable system: read through the [Security](security/), [Failure Modes](failure_modes/) and [Backup and Recovery](backup_recovery/) sections. -- For information on using the Cloud Native PostgreSQL Operator to deploy on public cloud platforms, see the [Cloud Setup](.../cloud_setup/) section. +- Webinar: [Watch Gabriele Bartolini discuss and demonstrate Cloud Native PostgreSQL lifecycle management](https://www.youtube.com/watch?v=S-I9y-HnAnI) -- For the design goals and possibilities offered by the Cloud Native PostgreSQL Operator, check out the [Architecture](../architecture/) and [Use cases](../use_cases/) sections. +- Development: [Leonardo Cecchi writes about setting up a local environment using Cloud Native PostgreSQL for application development](https://www.enterprisedb.com/blog/cloud-native-postgresql-application-developers) -- And for details on what it takes to configure a secure and reliable system, read through the [Security](../security/), [Failure Modes](../failure_modes/) and [Backup and Recovery](../backup_recovery/) sections. diff --git a/product_docs/docs/bart/2.5/bart_inst/02_installing_bart.mdx b/product_docs/docs/bart/2.5/bart_inst/02_installing_bart.mdx index f4e2ef84cbd..ae888987e6e 100644 --- a/product_docs/docs/bart/2.5/bart_inst/02_installing_bart.mdx +++ b/product_docs/docs/bart/2.5/bart_inst/02_installing_bart.mdx @@ -110,7 +110,7 @@ The following section demonstrates installing BART on a CentOS host using an RPM The `bart --version` command should return the current BART version. If the `bart --version` command returns an error stating the PATH is not available after switching from the root user to another BART user account, adjust the setting of the `PATH` environment variable to include the directory location of the BART `bin` subdirectory in the `~/.bashrc` or `~/.bash_profile` files of the following user accounts: - The BART user account on the BART host. See [Configuring BART](03_configuring_bart/#path) for details. - - The remote user account on the remote host to which incremental backups are to be restored. For details, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/latest/bart_user/). + - The remote user account on the remote host to which incremental backups are to be restored. For details, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/2.5/bart_user/). Upon successful installation, BART is installed in the `BART_HOME` directory: @@ -231,7 +231,7 @@ The following section demonstrates installing BART on a RHEL host using an RPM p The `bart --version` command should return the current BART version. If the `bart --version` command returns an error stating the PATH is not available after switching from the root user to another BART user account, adjust the setting of the `PATH` environment variable to include the directory location of the BART `bin` subdirectory in the `~/.bashrc` or `~/.bash_profile` files of the following user accounts: - The BART user account on the BART host. See [Configuring BART](03_configuring_bart/#path) for details. - - The remote user account on the remote host to which incremental backups are to be restored. For details, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/latest/bart_user/). + - The remote user account on the remote host to which incremental backups are to be restored. For details, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/2.5/bart_user/). Upon successful installation, BART is installed in the `BART_HOME` directory: diff --git a/product_docs/docs/bart/2.5/bart_inst/03_configuring_bart.mdx b/product_docs/docs/bart/2.5/bart_inst/03_configuring_bart.mdx index 0201f230692..95395d5cef9 100644 --- a/product_docs/docs/bart/2.5/bart_inst/03_configuring_bart.mdx +++ b/product_docs/docs/bart/2.5/bart_inst/03_configuring_bart.mdx @@ -124,11 +124,11 @@ The following table describes the `[BART]` host parameters. | `[BART}` | Mandatory | Identifies the global section of the configuration file. It must be named BART. | | `bart_host` | Mandatory | Specify the bart user name and the IP address of the bart host on which the BART utility resides. You must specify it in the form of <bart_user>@<bart_host_address>. | | `backup_path` | Mandatory | Specify the path to the file system parent directory where all BART backups are stored. | -| `pg_basebackup_path` | Mandatory | Specify the path to the `pg_basebackup` program that you installed on the BART host. For information about `pg_basebackup` version-specific restrictions, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). | +| `pg_basebackup_path` | Mandatory | Specify the path to the `pg_basebackup` program that you installed on the BART host. For information about `pg_basebackup` version-specific restrictions, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). | | `wal_compression` | Optional | Set this parameter to `enabled` to compress the archived WAL files in gzip format in the BART backup catalog when the `MANAGE` subcommand is invoked. By default it is set to `disabled`. The gzip compression program must be in the BART user account’s `PATH` and the WAL compression setting must not be enabled for those database servers where you need to take incremental backups. | | `copy_wals_during_restore` | Optional | Set this parameter to `enabled` to copy the archived WAL files from the BART backup catalog to the `restore_path/archived_wals` directory prior to the database server archive recovery. Enabling this option helps you save time during the restore operation. Set this parameter to `disabled` (default) to retrieve the archived WAL files directly from the BART backup catalog during the database server archive recovery. During the restore operation, recovery settings will be saved in the `postgresql.auto.conf` file. The `restore_command` in the `postgresql.auto.conf` file will be determined by the value specified in the `copy_wals_during_restore` parameter. If the `RESTORE` subcommand is invoked with the `-c` option, the archived WAL files are copied from the BART backup catalog to the `restore_path/archived_wals` directory, thus overriding any setting of the `copy_wals_during_restore` parameter. If the `RESTORE` subcommand is invoked without the `-c` option, the value specified by the `copy_wals_during_restore` parameter is used. | | `xlog_method` | Optional | Specify how the transaction log is collected during the execution of `pg_basebackup` through the `BACKUP` subcommand. Set `xlog_method` to `fetch` (default) to collect the transaction log files after the backup is completed. Set to `stream` to stream the transaction log in parallel with the full backup creation. | -| `retention_policy` | Optional | Set this parameter to determine when an active backup should be marked as `obsolete` when the `MANAGE` subcommand is used. You can specify the retention policy either in terms of number of backups or duration (days, weeks, or months). ` BACKUPS` (default), ` DAYS`, ` WEEKS`, or ` MONTHS` where `` is a positive integer. For information about managing backups using a retention policy, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). | +| `retention_policy` | Optional | Set this parameter to determine when an active backup should be marked as `obsolete` when the `MANAGE` subcommand is used. You can specify the retention policy either in terms of number of backups or duration (days, weeks, or months). ` BACKUPS` (default), ` DAYS`, ` WEEKS`, or ` MONTHS` where `` is a positive integer. For information about managing backups using a retention policy, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). | | `logfile` | Optional | Use this parameter to specify the path to the BART log file. The default log file location is `/tmp/bart.log`. The log file will be created the first time you invoke the `bart` command line program using the sample configuration file value. To change the default setting, you must delete the `bart.log` file from the `/tmp` directory and create a new log file in another directory so that a new log file will be created and owned by the new BART user account. If no path to a log file is specified, BART does not create a log file. | | `scanner_logfile` | Optional | Use this parameter to specify the path to the XLOG/WAL scanner log file. The default location is `/tmp/bart_scanner.log`. The scanner log file will be created the first time you invoke the `bart_scanner` program using the sample configuration file value. To change the default setting, you must delete the `bart_scanner.log` file from the `/tmp` directory and create a new log file in another directory so that a new log file will be created and owned by the new BART user account. If no path to a log file is specified, BART does not create a WAL scanner log file. | | `` | Optional | Specify the socket directory path where all BART sockets will be stored. The default directory is `/tmp`. While specifying the `bart_socket_directory` path, you must ensure that the directory exists and the BART user has the required access permissions to the directory. | @@ -199,7 +199,7 @@ For BART usage, there are two scenarios that require a passwordless SSH/SCP conn - The public key file name should be appended to the `~/.ssh/authorized_keys` file on the database server host. The `authorized_keys` file is in the home directory of the user account that owns the directory where the database backup is to be restored. - If backups are to be taken from a given database server host, but restored to a different database server host, the passwordless SSH/SCP connections must be configured from the BART host to the database server host from which the backup is to be taken as well as from the BART host to the database server host to which the backup is to be restored. -See the EDB Backup and Recovery Reference Guide available at the [EDB website](/bart/latest/bart_ref/) to view examples of creating a passwordless connection. +See the EDB Backup and Recovery Reference Guide available at the [EDB website](/bart/2.5/bart_ref/) to view examples of creating a passwordless connection. **Enabling Public Key Authentication** @@ -362,7 +362,7 @@ The following table describes the database server parameters. | `` | Mandatory | Specify the Linux operating system user account that owns the database cluster. This is typically `enterprisedb` for Advanced Server database clusters installed in the Oracle compatible mode, or `postgres` for Advanced Server database clusters installed in the PostgreSQL compatible mode and PostgreSQL database clusters. | | `` | Optional | Specify the IP address of the remote server to which a backup is to be restored. Specify this parameter in the form of `@`. `` is the user account on the target database server host that accepts a passwordless SSH/SCP login connection and owns the directory where the backup is to be restored. `` is the IP address of the remote host. For restoring a backup to a remote host or for restoring any backup where `` and the BART user account are not the same users, either this parameter must be set or it may be specified with the `-r` option with the BART `RESTORE` subcommand. | | `` | Optional | Specify path to which tablespaces are to be restored in the format `OID = `; If the backup is to be restored to a remote host specified by the `` parameter, then the tablespace paths must exist on the remote host. | -| `allow_incremental_backups` | Optional | Set this parameter to `enabled` to enable use of the WAL scanner and permit taking incremental backups when the `BACKUP` subcommand is invoked with the `--parent` option. Set it to `disabled` (default) to disallow incremental backups and thus permit only full backups. For information about using the `BACKUP` subcommand and running the WAL scanner, please see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). | +| `allow_incremental_backups` | Optional | Set this parameter to `enabled` to enable use of the WAL scanner and permit taking incremental backups when the `BACKUP` subcommand is invoked with the `--parent` option. Set it to `disabled` (default) to disallow incremental backups and thus permit only full backups. For information about using the `BACKUP` subcommand and running the WAL scanner, please see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). | | `Description` | Optional | Specify the description that will be used to identify the database server. | For information regarding the following parameters, see [configuring the BART host](#configuring-the-bart-host). @@ -447,7 +447,7 @@ Set the following configuration parameters in the `postgresql.conf` file to enab - Set the PostgreSQL `archive_command` parameter to copy the WAL files to the `archive_path`. The `archive_command` configuration parameter mentioned here is located in the `postgresql.conf` file; the PostgreSQL `archive_command` parameter is used in a different manner than the BART [archive_command](#archive_command). - Set `max_wal_senders` to a value high enough to leave at least one session available for the backup. If the `xlog_method=stream` parameter setting is to be used by this database server, the `max_wal_senders` setting must account for an additional session for the transaction log streaming (the setting must be a minimum of 2). See [Configuring the BART host](#configuring-the-bart-host) for information about the `xlog_method` parameter. -For detailed information about WAL archiving, see the [PostgreSQL Core Documentation](https://www.postgresql.org/docs/current/static/continuous-archiving.html). +For detailed information about WAL archiving, see the [PostgreSQL Core Documentation](https://www.postgresql.org/docs/12/static/continuous-archiving.html). The `ARCHIVE PATH` field displayed by the BART `SHOW-SERVERS` subcommand displays the full directory path where the WAL files should be copied as specified in the `archive_command` configuration parameter in the `postgresql.conf` file: @@ -501,7 +501,7 @@ To enable WAL archiving: - In the `postgresql.conf` file, set the `wal_level` to `replica` or higher, `archive_mode` to `on`, and `max_wal_senders` to a value high enough to leave at least one session available for the backup. If the `xlog_method=stream` parameter setting is to be used by this database server as determined in the BART configuration file, the `max_wal_senders` setting must account for an additional session for the transaction log streaming (that is, the setting must be a minimum of `2`). See [Configuring the BART host](#configuring-the-bart-host) for information on the `xlog_method` parameter. -- Configure the Postgres `archive_command` parameter automatically with the `INIT` subcommand and restart the database server when you are ready to initiate WAL archiving. The `INIT` subcommand invokes the Postgres `ALTER SYSTEM` command to set the Postgres `archive_command` configuration parameter in the `postgresql.auto.conf` file located in the managed database server’s `POSTGRES_INSTALL_HOME data directory`. For additional information about the `INIT` subcommand, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). +- Configure the Postgres `archive_command` parameter automatically with the `INIT` subcommand and restart the database server when you are ready to initiate WAL archiving. The `INIT` subcommand invokes the Postgres `ALTER SYSTEM` command to set the Postgres `archive_command` configuration parameter in the `postgresql.auto.conf` file located in the managed database server’s `POSTGRES_INSTALL_HOME data directory`. For additional information about the `INIT` subcommand, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). The archive command string that the `INIT` subcommand generates into the `postgresql.auto.conf` file is determined by the parameter setting of the BART `archive_command` parameter in the server section of the BART configuration file. If the BART `archive_command` parameter is not set in the server section for a given database server, the command string that is configured uses the following default format: @@ -640,4 +640,4 @@ The `CHECK-CONFIG` subcommand confirms the following: - Archiving of WAL files to the `archive_path` is in process. - The WAL scanner program is running. -After configuring the BART host and the database server(s), you can start using BART. For information about using BART, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). +After configuring the BART host and the database server(s), you can start using BART. For information about using BART, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). diff --git a/product_docs/docs/bart/2.5/bart_inst/04_upgrading_bart.mdx b/product_docs/docs/bart/2.5/bart_inst/04_upgrading_bart.mdx index 4ea97b87ab6..4115c975dcf 100644 --- a/product_docs/docs/bart/2.5/bart_inst/04_upgrading_bart.mdx +++ b/product_docs/docs/bart/2.5/bart_inst/04_upgrading_bart.mdx @@ -62,12 +62,12 @@ bart-scanner STOP **Step 3:** Repeat the process described in this section to upgrade to the latest BART version on each remote hosts where an incremental backup will be restored. -For additional information about restoration of incremental backups on remote hosts, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). +For additional information about restoration of incremental backups on remote hosts, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). **Step 4:** If the `bart --version` command returns an error stating the `PATH` is not available after switching from `root` user to another BART user account, adjust the setting of the `PATH` environment variable to include the location of the BART x.y.z executable (the `bin` subdirectory) in the `~/.bashrc` or `~/.bash_profile` files of the following user accounts: - The BART user account on the BART host. -- The remote user account on the remote host to which incremental backups are to be restored. For details, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). +- The remote user account on the remote host to which incremental backups are to be restored. For details, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). The `PATH` setting should be the same as set for BART x.y.z since all versions use `/usr/edb/bart/bin`. diff --git a/product_docs/docs/bart/2.5/bart_inst/05_uninstalling_bart.mdx b/product_docs/docs/bart/2.5/bart_inst/05_uninstalling_bart.mdx index 8374221ef88..de8c1c0d00b 100644 --- a/product_docs/docs/bart/2.5/bart_inst/05_uninstalling_bart.mdx +++ b/product_docs/docs/bart/2.5/bart_inst/05_uninstalling_bart.mdx @@ -31,7 +31,7 @@ Uninstalling BART does not delete the backup files and archived WAL files that r - `rm –rf /opt/backup` - BART `DELETE` subcommand -For information about the BART `DELETE` subcommand, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/latest/bart_user/). +For information about the BART `DELETE` subcommand, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/2.5/bart_user/). ## Uninstalling BART on an SLES 12 Host diff --git a/product_docs/docs/bart/2.5/bart_qs_7/index.mdx b/product_docs/docs/bart/2.5/bart_qs_7/index.mdx index 1d54bf90879..bf35c868fc3 100644 --- a/product_docs/docs/bart/2.5/bart_qs_7/index.mdx +++ b/product_docs/docs/bart/2.5/bart_qs_7/index.mdx @@ -9,7 +9,7 @@ legacyRedirectsGenerated: This tutorial demonstrates using `yum` to [install](#installing) and [configure](../bart_qs_8/#configuring) Backup and Recovery Tool (BART) 2.5 on a CentOS 7 host with minimal configuration settings.  The tutorial assumes that the user has some knowledge of installation and system administration procedures, and has administrative privileges on the host. -For detailed information about BART installation and configuration, see the *BART Installation and Upgrade Guide*, available at the [EDB website](/bart/latest/bart_inst/). +For detailed information about BART installation and configuration, see the *BART Installation and Upgrade Guide*, available at the [EDB website](/bart/2.5/bart_inst/). - BART is tested with the following database versions: @@ -113,7 +113,7 @@ Before configuring BART, establish the BART user account (the operating system u cp bart.cfg.sample bart.cfg ``` -3. Open the BART configuration file (`bart.cfg`) using an editor of your choice and scroll through the BART configuration file to edit the file as required; sample settings are included for your reference. You must add the mandatory parameters to the `[BART]` and `[ServerName]` sections. Default values may be used for optional parameters. For detailed information about parameter settings, see the *BART Installation and Upgrade Guide*, available at the [EDB website](/bart/latest/bart_inst/). +3. Open the BART configuration file (`bart.cfg`) using an editor of your choice and scroll through the BART configuration file to edit the file as required; sample settings are included for your reference. You must add the mandatory parameters to the `[BART]` and `[ServerName]` sections. Default values may be used for optional parameters. For detailed information about parameter settings, see the *BART Installation and Upgrade Guide*, available at the [EDB website](/bart/2.5/bart_inst/). Parameters set in the `[BART]` section are applicable to all BART managed database servers, while parameters set in the `[ServerName]` section are applicable only to the specific server; `[ServerName]` settings override `[BART]` section settings. @@ -188,7 +188,7 @@ The following table describes only mandatory parameters: bart CHECK-CONFIG [ -s ] ``` - BART is now configured successfully. For detailed information about using BART, see the *EDB Backup and Recovery Tool User Guide*, available at the [EDB website](/bart/latest/bart_user/). + BART is now configured successfully. For detailed information about using BART, see the *EDB Backup and Recovery Tool User Guide*, available at the [EDB website](/bart/2.5/bart_user/). @@ -254,7 +254,7 @@ The following example enables SSH/SCP access on a CentOS 7.x host; similar (plat If backups are to be taken from a given database server host, but restored to a different database server host, the passwordless SSH/SCP connections must be configured from the BART host to the database server host from which the backup is to be taken as well as from the BART host to the database server host to which the backup is to be restored. -An example of how to create a passwordless connection is documented in the *EDB Backup and Recovery Reference Guide*, available at the [EDB website](/bart/latest/bart_ref/). +An example of how to create a passwordless connection is documented in the *EDB Backup and Recovery Reference Guide*, available at the [EDB website](/bart/2.5/bart_ref/). Even when the Advanced Server database is on the same host as BART, and the Advanced Server database cluster owner is also the BART user account, a passwordless SSH/SCP connection must be established from the same user account to itself. @@ -316,6 +316,6 @@ If an `authorized_keys` file does not exist, create a new file, but do not compl 2. Specify this replication database user in the `user` parameter of the `bart.cfg` file. -3. The [pg_hba.conf](https://www.postgresql.org/docs/current/auth-pg-hba-conf.html) file must minimally permit the replication database user to have access to the database. The IP address from which the replication database user has access to the database is the BART host location. The replication database user must also be included in the `pg_hba.conf` file as a replication database connection if `pg_basebackup` is to be used for taking any backups. +3. The [pg_hba.conf](https://www.postgresql.org/docs/12/auth-pg-hba-conf.html) file must minimally permit the replication database user to have access to the database. The IP address from which the replication database user has access to the database is the BART host location. The replication database user must also be included in the `pg_hba.conf` file as a replication database connection if `pg_basebackup` is to be used for taking any backups. 4. To ensure there is no password prompt when connecting to the database server with the replication database user, a recommended method is to use the `.pgpass` file located in the BART user account’s home directory (if it does not exist, you need to create the `.pgpass` file with the required privileges). The `.pgpass` file must contain an entry for each BART managed database server, and its corresponding replication database user and password. diff --git a/product_docs/docs/bart/2.5/bart_qs_8/index.mdx b/product_docs/docs/bart/2.5/bart_qs_8/index.mdx index 7dc447abfb2..008295fcc1d 100644 --- a/product_docs/docs/bart/2.5/bart_qs_8/index.mdx +++ b/product_docs/docs/bart/2.5/bart_qs_8/index.mdx @@ -9,7 +9,7 @@ legacyRedirectsGenerated: This tutorial demonstrates using the `dnf` command to install and configure the EDB Backup and Recovery Tool (BART) 2.5 on a CentOS 8 host with minimal configuration settings.  The tutorial assumes that the user has some knowledge of installation and system administration procedures and has administrative privileges on the host. -For detailed information about BART installation and configuration, see the *BART Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/). +For detailed information about BART installation and configuration, see the *BART Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/). - BART is tested with the following database versions: @@ -109,7 +109,7 @@ Before configuring BART, establish the BART user account (the operating system u cp bart.cfg.sample bart.cfg ``` -3. Open the BART configuration file (`bart.cfg`) using an editor of your choice and scroll through the BART configuration file to edit the file as required; sample settings are included for your reference. You must add the mandatory parameters to the `[BART]` and `[ServerName]` sections. Default values may be used for optional parameters. For detailed information about parameter settings, see the *BART Installation and Upgrade Guide*, available at the [EDB website](/bart/latest/bart_inst/). +3. Open the BART configuration file (`bart.cfg`) using an editor of your choice and scroll through the BART configuration file to edit the file as required; sample settings are included for your reference. You must add the mandatory parameters to the `[BART]` and `[ServerName]` sections. Default values may be used for optional parameters. For detailed information about parameter settings, see the *BART Installation and Upgrade Guide*, available at the [EDB website](/bart/2.5/bart_inst/). Parameters set in the `[BART]` section are applicable to all BART managed database servers, while parameters set in the `[ServerName]` section are applicable only to the specific server; `[ServerName]` settings override `[BART]` section settings. @@ -184,7 +184,7 @@ The following table describes only mandatory parameters: bart CHECK-CONFIG [ -s ] ``` - BART is now configured successfully. For detailed information about using BART, see the *EDB Backup and Recovery Tool User Guide* available at the [EDB website](/bart/latest/bart_user/). + BART is now configured successfully. For detailed information about using BART, see the *EDB Backup and Recovery Tool User Guide* available at the [EDB website](/bart/2.5/bart_user/). @@ -250,7 +250,7 @@ The following example enables SSH/SCP access on a CentOS 8.x host; similar (plat If backups are to be taken from a given database server host, but restored to a different database server host, the passwordless SSH/SCP connections must be configured from the BART host to the database server host from which the backup is to be taken as well as from the BART host to the database server host to which the backup is to be restored. -An example of how to create a passwordless connection is documented in the *EDB Backup and Recovery Reference Guide*, available at the [EDB website](/bart/latest/bart_ref/). +An example of how to create a passwordless connection is documented in the *EDB Backup and Recovery Reference Guide*, available at the [EDB website](/bart/2.5/bart_ref/). Even when the Advanced Server database is on the same host as BART, and the Advanced Server database cluster owner is also the BART user account, a passwordless SSH/SCP connection must be established from the same user account to itself. @@ -312,6 +312,6 @@ If the `authorized_keys file` does not exist, create a new file, but do not comp 2. Specify this replication database user in the `user` parameter of the `bart.cfg` file. -3. The [pg_hba.conf](https://www.postgresql.org/docs/current/auth-pg-hba-conf.html) file must minimally permit the replication database user to have access to the database. The IP address from which the replication database user has access to the database is the BART host location. The replication database user must also be included in the `pg_hba.conf` file as a replication database connection if `pg_basebackup` is to be used for taking any backups. +3. The [pg_hba.conf](https://www.postgresql.org/docs/12/auth-pg-hba-conf.html) file must minimally permit the replication database user to have access to the database. The IP address from which the replication database user has access to the database is the BART host location. The replication database user must also be included in the `pg_hba.conf` file as a replication database connection if `pg_basebackup` is to be used for taking any backups. 4. To ensure there is no password prompt when connecting to the database server with the replication database user, a recommended method is to use the `.pgpass` file located in the BART user account’s home directory (if it does not exist, you need to create the `.pgpass` file with the required privileges). The `.pgpass` file must contain an entry for each BART managed database server, and its corresponding replication database user and password. diff --git a/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/01_backup.mdx b/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/01_backup.mdx index 2649c676929..97172dd9b88 100644 --- a/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/01_backup.mdx +++ b/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/01_backup.mdx @@ -35,7 +35,7 @@ bart BACKUP –s [-Fp] ``` -Before performing an incremental backup, you must take a full backup. For more details about incremental backup, refer to *Block-Level Incremental Backup* in the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/latest/bart_user/). +Before performing an incremental backup, you must take a full backup. For more details about incremental backup, refer to *Block-Level Incremental Backup* in the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/2.5/bart_user/). The following table describes the `BACKUP` options: @@ -45,8 +45,8 @@ The following table describes the `BACKUP` options: | `-F { p \| t }`
`--format { p \| t }` | Use this option to specify the backup file format.
Specify `p` option to take backup in plain text format and specify `t` option to take backup in tar format. If the `p` or `t` option is omitted, the default is tar format.
Use `p` option with the `BACKUP` subcommand when streaming is used as a backup method.
An incremental backup can only be taken in plain text format (`p`). | | `-z`
`(--gzip)` | This option is applicable only for full backup and `tar` format. Use this option to enable gzip compression of tar files using the default compression level (typically 6). | | `-c `
`--compress-level ` | This is applicable only for full backup and tar format. Use this option to specify the gzip compression level on the tar file output. `` is a digit from 1 through 9, with 9 being the best compression. | -| `--backup-name ` | Use this option to assign a user-defined, alphanumeric friendly name to the backup. The maximum permitted length of backup name is 49 characters.
For detailed information about this parameter, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/).
If the option `--backup-name` is not specified and the `backup_name` parameter is not set for this database server in the BART configuration file, then the backup can only be referenced in other BART subcommands by the BART assigned backup identifier. | -| `--thread-count ` | Use this option to specify the number of worker threads to run in parallel to copy blocks for a backup.
For detailed information about the `--thread-count` parameter, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/latest/bart_inst/). | +| `--backup-name ` | Use this option to assign a user-defined, alphanumeric friendly name to the backup. The maximum permitted length of backup name is 49 characters.
For detailed information about this parameter, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/).
If the option `--backup-name` is not specified and the `backup_name` parameter is not set for this database server in the BART configuration file, then the backup can only be referenced in other BART subcommands by the BART assigned backup identifier. | +| `--thread-count ` | Use this option to specify the number of worker threads to run in parallel to copy blocks for a backup.
For detailed information about the `--thread-count` parameter, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/2.5/bart_inst/). | | `--with-pg_basebackup` | This is applicable only for full backup. Use this option to specify the use of `pg_basebackup` to take a full backup. The number of thread counts in effect is ignored as given by the `thread_count` parameter in the BART configuration file.
When taking a full backup, if the thread count in effect is greater than `1`, then the `pg_basebackup` utility is not used to take the full backup (parallel worker threads are used) unless the `--with-pg_basebackup` option is specified with the `BACKUP` subcommand. | | `--no-pg_basebackup` | This is applicable only for full backup. Use this option to specify that `pg_basebackup` is not to be used to take a full backup.
When taking a full backup, if the thread count in effect is only `1`, then the `pg_basebackup` utility is used to take the full backup unless the `--no-pg_basebackup` option is specified with the `BACKUP` subcommand. | | `--parent { \| }` | Use this option to take an incremental backup. The parent backup is a backup taken prior to the incremental backup; it can be either a full backup or an incremental backup. `` is the backup identifier of a parent backup and `` is the user-defined alphanumeric name of a parent backup. | diff --git a/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/03_delete.mdx b/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/03_delete.mdx index a148c571880..14940d188ca 100644 --- a/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/03_delete.mdx +++ b/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/03_delete.mdx @@ -18,7 +18,7 @@ bart DELETE –s Note that when invoking the `DELETE` subcommand, you must specify a database server. -For database servers under a retention policy, there are conditions where certain backups may not be deleted. For more information, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). +For database servers under a retention policy, there are conditions where certain backups may not be deleted. For more information, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). The following table describes the `DELETE` options: diff --git a/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/05_manage.mdx b/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/05_manage.mdx index 75e63130a8e..467ff8b393c 100644 --- a/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/05_manage.mdx +++ b/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/05_manage.mdx @@ -21,7 +21,7 @@ bart MANAGE [ –s { | all} ] [ -n ] ``` -To view detailed information about the `MANAGE` subcommand and retention policy management, see *the EDB Backup and Recovery User Guide*. For information about setting the `wal_compression` parameter, see the *EDB Backup and Recovery Installation and Upgrade Guide*. These guides are available at the [EDB website](/bart/latest/bart_user/). +To view detailed information about the `MANAGE` subcommand and retention policy management, see *the EDB Backup and Recovery User Guide*. For information about setting the `wal_compression` parameter, see the *EDB Backup and Recovery Installation and Upgrade Guide*. These guides are available at the [EDB website](/bart/2.5/bart_user/). The following table describes the `MANAGE` options: diff --git a/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/06_restore.mdx b/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/06_restore.mdx index b0399e9d3e4..e93ca1fb605 100644 --- a/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/06_restore.mdx +++ b/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/06_restore.mdx @@ -20,11 +20,11 @@ bart RESTORE –s -p [ -c ] ``` -To view detailed information about the `RESTORE` subcommand, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/latest/bart_user/). +To view detailed information about the `RESTORE` subcommand, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/2.5/bart_user/). If the backup is restored to a different database cluster directory than where the original database cluster resided, then some operations dependent upon the database cluster location may fail. This happens if the supporting service scripts are not updated to reflect the new directory location of restored backup. -For information about the use and modification of service scripts, see the EDB Advanced Server Installation Guide available at the [EDB website](/epas/latest/). +For information about the use and modification of service scripts, see the EDB Advanced Server Installation Guide. The following table describes the `RESTORE` options: @@ -33,12 +33,12 @@ The following table describes the `RESTORE` options: | `-s `
`--server ` | `` is the name of the database server to be restored. | | `-p --restore-path `
`--restore-path ` | `` is the directory path where the backup of the database server is to be restored. The directory must be empty and have the proper ownership and privileges assigned to it. | | `-i { \| }`

`--backupid { \| }` | `backup_id` is the backup identifier of the backup to be used for the restoration and `` is the user-defined alphanumeric name for the backup.
If the option is omitted, the latest backup is restored by default. | -| `-r `

`--remote-host ` | `` is the user account on the remote database server host that accepts a passwordless SSH/SCP login connection and is the owner of the directory where the backup is to be restored.
`` is the IP address of the remote host to which the backup is to be restored. This option must be specified if the `remote_host` parameter for this database server is not set in the BART configuration file.
For information about the `remote_host` parameter, see the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/). | +| `-r `

`--remote-host ` | `` is the user account on the remote database server host that accepts a passwordless SSH/SCP login connection and is the owner of the directory where the backup is to be restored.
`` is the IP address of the remote host to which the backup is to be restored. This option must be specified if the `remote_host` parameter for this database server is not set in the BART configuration file.
For information about the `remote_host` parameter, see the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/). | | `-w `
`--workers ` | `` is the number of worker processes to run in parallel to stream the modified blocks of an incremental backup to the restore location. If the `-w` option is omitted, the default is `1` worker process.
For example, if four worker processes are specified, four receiver processes on the restore host and four streamer processes on the BART host are used. The output of each streamer process is connected to the input of a receiver process.
When the receiver gets to the point where it needs a modified block file, it obtains those modified blocks from its input. With this method, the modified block files are never written to the restore host disk. | | `-t `
`--target-tli ` | `` is the integer identifier of the timeline to be used for replaying the archived WAL files for point-in-time recovery. | | `-x `
`--target-xid ` | `` is the integer identifier of the transaction ID that determines the transaction up to and including, which point-in-time recovery encompasses. | | `-g `

`--target-timestamp ` | `` is the timestamp that determines the point in time up to and including, which point-in-time recovery encompasses. | -| `-c`

`--copy-wals` | Specify this option to copy archived WAL files from the BART backup catalog to `/archived_wals` directory.
The `restore_command` retrieves the WAL files from `/archived_wals` for the database server archive recovery.
If the `-c` option is omitted and the `copy_wals_during_restore` parameter in the BART configuration file is not enabled in a manner applicable to this database server, then the `restore_command` in the `postgresql.conf` retrieves the archived WAL files directly from the BART backup catalog.
For information about the `copy_wals_during_restore` parameter, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/latest/bart_inst/). | +| `-c`

`--copy-wals` | Specify this option to copy archived WAL files from the BART backup catalog to `/archived_wals` directory.
The `restore_command` retrieves the WAL files from `/archived_wals` for the database server archive recovery.
If the `-c` option is omitted and the `copy_wals_during_restore` parameter in the BART configuration file is not enabled in a manner applicable to this database server, then the `restore_command` in the `postgresql.conf` retrieves the archived WAL files directly from the BART backup catalog.
For information about the `copy_wals_during_restore` parameter, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/2.5/bart_inst/). | **Examples** diff --git a/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/10_running_the_bart_wal_scanner.mdx b/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/10_running_the_bart_wal_scanner.mdx index e28e5954347..cda813f49a8 100644 --- a/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/10_running_the_bart_wal_scanner.mdx +++ b/product_docs/docs/bart/2.5/bart_ref/01_bart_subcommands_examples/10_running_the_bart_wal_scanner.mdx @@ -8,7 +8,7 @@ legacyRedirectsGenerated: The BART WAL scanner is used to process each WAL file to find and record modified blocks in a corresponding MBM file. As a BART account user, use the BART WAL scanner to invoke the `bart-scanner` program located in the `/bin` directory. -For detailed information about the WAL scanner and its usage, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). +For detailed information about the WAL scanner and its usage, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). **Syntax:** diff --git a/product_docs/docs/bart/2.5/bart_ref/02_additional_examples.mdx b/product_docs/docs/bart/2.5/bart_ref/02_additional_examples.mdx index 38a519f727d..faea13bbb52 100644 --- a/product_docs/docs/bart/2.5/bart_ref/02_additional_examples.mdx +++ b/product_docs/docs/bart/2.5/bart_ref/02_additional_examples.mdx @@ -17,7 +17,7 @@ This section lists examples of the following BART operations. ## Restoring a Database Cluster with Tablespaces -The following code sample illustrates taking a backup and restoring a database cluster on a remote host containing tablespaces. For detailed information regarding using tablespaces, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). +The following code sample illustrates taking a backup and restoring a database cluster on a remote host containing tablespaces. For detailed information regarding using tablespaces, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). On an Advanced Server database running on a remote host, the following tablespaces are created for use by two tables: @@ -264,7 +264,7 @@ tblspc_2 | enterprisedb | /opt/restore_tblspc_2 ## Restoring an Incremental Backup -Restoring an incremental backup may require additional setup steps depending upon the host on which the incremental backup is to be restored. For more information, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). +Restoring an incremental backup may require additional setup steps depending upon the host on which the incremental backup is to be restored. For more information, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). This section provides an example of creating backup chains and then restoring an incremental backup. @@ -453,7 +453,7 @@ Restoring incremental backup `incr_1-b` as shown by the preceding example result ## Managing Backups -This section illustrates evaluating, marking, and deleting backups using the `MANAGE` subcommand using a redundancy retention policy and a recovery window retention policy. For detailed information about the `MANAGE` subcommand, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/latest/bart_user/). +This section illustrates evaluating, marking, and deleting backups using the `MANAGE` subcommand using a redundancy retention policy and a recovery window retention policy. For detailed information about the `MANAGE` subcommand, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/2.5/bart_user/). @@ -1065,7 +1065,7 @@ dev 1428502171990 2015-04-08 10:09:34 EDT 5.65 MB 80.00 MB ## Managing Incremental Backups -This section illustrates evaluating, marking, and deleting incremental backups using the `MANAGE` and `DELETE` subcommands utilizing redundancy retention policy and recovery window retention policy. For detailed information about the `MANAGE` and `DELETE` subcommands, as well as the redundancy retention and recovery window retention policy, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/latest/bart_user/). +This section illustrates evaluating, marking, and deleting incremental backups using the `MANAGE` and `DELETE` subcommands utilizing redundancy retention policy and recovery window retention policy. For detailed information about the `MANAGE` and `DELETE` subcommands, as well as the redundancy retention and recovery window retention policy, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/2.5/bart_user/). - [Using a Redundancy Retention Policy](#redundancy_retention_policy) provides an example of using the `MANAGE` and `DELETE` subcommands when a 3 backup redundancy retention policy is in effect. - [Using a Recovery Window Retention Policy](#recovery_window_retention_policy) provides an example of using the `MANAGE` and `DELETE` subcommands when a 1-day recovery window retention policy is in effect. diff --git a/product_docs/docs/bart/2.5/bart_ref/03_sample_bart_system_with_local_and_remote_database_servers.mdx b/product_docs/docs/bart/2.5/bart_ref/03_sample_bart_system_with_local_and_remote_database_servers.mdx index b1527216806..a383fec90bf 100644 --- a/product_docs/docs/bart/2.5/bart_ref/03_sample_bart_system_with_local_and_remote_database_servers.mdx +++ b/product_docs/docs/bart/2.5/bart_ref/03_sample_bart_system_with_local_and_remote_database_servers.mdx @@ -10,7 +10,7 @@ legacyRedirectsGenerated: This section describes a sample BART managed backup and recovery system consisting of both local and remote database servers. The complete steps to configure and operate the system are provided. -For detailed information about configuring a BART system, see the *EDB Backup and Recovery Installation and Upgrade Guide*. For detailed information about the operational procedures and BART subcommands, see the *EDB Backup and Recovery User Guide*. These guides are available at the [EDB website](/bart/latest/bart_inst/). +For detailed information about configuring a BART system, see the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/). For detailed information about the operational procedures and BART subcommands, see the *EDB Backup and Recovery User Guide* available at the [EDB website](/bart/2.5/bart_user/). The environment for this sample system is as follows: @@ -591,7 +591,7 @@ Add entries to the `.pgpass` file on each server to allow the BART user account 192.168.2.24:5432:*:postgres:password ``` -For more information about using a `.pgpass` file, please see the [PostgreSQL documentation](https://www.postgresql.org/docs/current/libpq-pgpass.html). +For more information about using a `.pgpass` file, please see the [PostgreSQL documentation](https://www.postgresql.org/docs/12/libpq-pgpass.html). While connected to `MKTG` on 192.168.2.24, execute the following `CREATE ROLE` command to create the replication database superuser: @@ -848,7 +848,7 @@ drwx------ 2 enterprisedb enterprisedb 4096 Apr 23 15:36 backup Use the BART `INIT` subcommand to complete the directory structure and set the Postgres `archive_command` configuration parameter. -Before invoking any BART subcommands, set up a profile under the BART user account’s home directory to set the `LD_LIBRARY_PATH` and `PATH` environment variables. For more information regarding setting this variable, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/latest/bart_inst/). +Before invoking any BART subcommands, set up a profile under the BART user account’s home directory to set the `LD_LIBRARY_PATH` and `PATH` environment variables. For more information regarding setting this variable, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/2.5/bart_inst/). The `-o` option is specified with the `INIT` subcommand to force the setting of the Postgres `archive_command` configuration parameter when `archive_mode` is `off` or if the Postgres `archive_command` parameter is already set and needs to be overridden. diff --git a/product_docs/docs/bart/2.5/bart_ref/index.mdx b/product_docs/docs/bart/2.5/bart_ref/index.mdx index 6a8817bd039..2f5c5504dcf 100644 --- a/product_docs/docs/bart/2.5/bart_ref/index.mdx +++ b/product_docs/docs/bart/2.5/bart_ref/index.mdx @@ -18,7 +18,7 @@ This guide acts as a quick reference for BART subcommands and provides comprehen - Evaluating, marking, and deleting backups and incremental backups - Configuring and operating local and remote database servers -For detailed information about BART subcommands and operations, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/latest/bart_user/). +For detailed information about BART subcommands and operations, see the EDB Backup and Recovery User Guide available at the [EDB website](/bart/2.5/bart_user/). The document is organized as follows: diff --git a/product_docs/docs/bart/2.5/bart_user/01_introduction.mdx b/product_docs/docs/bart/2.5/bart_user/01_introduction.mdx index 1212b2847bf..4e6b92da76a 100644 --- a/product_docs/docs/bart/2.5/bart_user/01_introduction.mdx +++ b/product_docs/docs/bart/2.5/bart_user/01_introduction.mdx @@ -26,7 +26,7 @@ This guide provides the following information about using BART: - [backup and recovery management process](03_using_bart/#using_bart). - [using tablespaces](04_using_tablespaces/#using_tablespaces). -For information about installing BART, see the *EDB Backup and Recovery Installation and Upgrade Guide*; for examples of BART operations and subcommand usage, see the *EDB Backup and Recovery Reference Guide*. These guides are available at the [EDB website](/bart/latest/bart_inst/). +For information about installing BART, see the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/). For examples of BART operations and subcommand usage, see the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/2.5/bart_ref/). @@ -50,6 +50,6 @@ BART takes full backups using the `pg_basebackup` utility program under the foll - The number of thread count in effect is 1, and the `with-pg_basebackup` option is not specified with the `BACKUP` subcommand. - Database servers can only be backed up using `pg_basebackup` utility program of the same or later version than the database server version. -In the global section of the BART configuration file, the `pg_basebackup_path` parameter specifies the complete directory path to the `pg_basebackup` program. For information about the `pg_basebackup_path` parameter and the `thread_count`, see the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/). +In the global section of the BART configuration file, the `pg_basebackup_path` parameter specifies the complete directory path to the `pg_basebackup` program. For information about the `pg_basebackup_path` parameter and the `thread_count`, see the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/). -For information about `pg_basebackup`, see the [PostgreSQL Core Documentation](https://postgresql.org/docs/current/static/app-pgbasebackup.html). +For information about `pg_basebackup`, see the [PostgreSQL Core Documentation](https://postgresql.org/docs/12/static/app-pgbasebackup.html). diff --git a/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/01_incremental_backup_limitations_and_requirements.mdx b/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/01_incremental_backup_limitations_and_requirements.mdx index a3fe8894d09..54629dd07b9 100644 --- a/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/01_incremental_backup_limitations_and_requirements.mdx +++ b/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/01_incremental_backup_limitations_and_requirements.mdx @@ -37,6 +37,6 @@ You must meet the following requirements before implementing incremental backup: - The incremental backup must be on the same timeline as the parent backup. The timeline changes after each recovery operation so an incremental backup cannot use a parent backup from an earlier timeline. -For information about configuring these requirements, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/latest/bart_inst/). +For information about configuring these requirements, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/2.5/bart_inst/). The following section provides an overview of the basic incremental backup concepts. diff --git a/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/02_concept_overview.mdx b/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/02_concept_overview.mdx index 8183d4802bd..b433ff411af 100644 --- a/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/02_concept_overview.mdx +++ b/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/02_concept_overview.mdx @@ -14,7 +14,7 @@ Using incremental backups involves the following sequence of steps: The default `archive_path` is the BART backup catalog (`//archived_wals`). Using the `` parameter in the server section of the BART configuration file, you can specify the location where WAL files will be archived. - For more information about the `archive_path` parameter and configuring BART, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/latest/bart_inst/). + For more information about the `archive_path` parameter and configuring BART, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/2.5/bart_inst/). 2. Take an initial full backup with the `BACKUP` subcommand. This full backup establishes the parent of the first incremental backup. @@ -26,7 +26,7 @@ Using incremental backups involves the following sequence of steps: 5. The incremental backup process identifies which WAL files may contain changes from when the parent backup was taken to the starting point of the incremental backup. The corresponding MBM files are used to locate and copy the modified blocks to the incremental backup directory along with other database cluster directories and files. Instead of backing up all, full relation files, only the modified blocks are copied and saved. In addition, the relevant MBM files are condensed into one consolidated block map (CBM) file that is stored with the incremental backup. - Multiple block copier threads can be used to copy the modified blocks to the incremental backup directory. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/) for information about setting the `thread_count` parameter in the BART configuration file. See [Backup](../../03_using_bart/03_basic_bart_subcommand_usage/03_backup/#backup) for information about using the `--thread-count` option with the `BACKUP` subcommand. + Multiple block copier threads can be used to copy the modified blocks to the incremental backup directory. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/) for information about setting the `thread_count` parameter in the BART configuration file. See [Backup](../../03_using_bart/03_basic_bart_subcommand_usage/03_backup/#backup) for information about using the `--thread-count` option with the `BACKUP` subcommand. 6. Invoke the restore process for an incremental backup using the `RESTORE` subcommand in the same manner as restoring a full backup. The `-i` option specifies the backup identifier or name of the incremental backup to restore. The restore process begins by going back through the chain of past, parent incremental backups until the initial full backup starting the chain is identified. This full backup provides the initial set of directories and files to be restored to the location specified with the `-p` option. Each subsequent incremental backup in the chain is then restored. Restoration of an incremental backup uses its CBM file to restore the modified blocks from the incremental backup. diff --git a/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/03_wal_scanning_preparation_for_an_incremental_backup.mdx b/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/03_wal_scanning_preparation_for_an_incremental_backup.mdx index 5264499e13e..b62fd717336 100644 --- a/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/03_wal_scanning_preparation_for_an_incremental_backup.mdx +++ b/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/03_wal_scanning_preparation_for_an_incremental_backup.mdx @@ -49,6 +49,6 @@ MBM files have the suffix, `.mbm`. In preparation for any incremental backup, the WAL files should be scanned as soon as they are copied to the `archive_path`. Thus, the WAL scanner should be running as soon as the WAL files from the database cluster are archived to the `archive_path`. If the `archive_path` contains WAL files that have not yet been scanned, starting the WAL scanner begins scanning these files. If WAL file fails to be scanned (resulting in a missing MBM file), you can use the WAL scanner to specify an individual WAL file. -Under certain conditions such as when the Network File System (NFS) is used to copy WAL files to the `archive_path`, the WAL files may have been missed by the WAL scanner program for scanning and creation of MBM files. Use the `scan_interval` parameter in the BART configuration file to initiate force scanning of WAL files in the `archive_path` to ensure MBM files are generated. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/) for more information about the `scan_interval` parameter. +Under certain conditions such as when the Network File System (NFS) is used to copy WAL files to the `archive_path`, the WAL files may have been missed by the WAL scanner program for scanning and creation of MBM files. Use the `scan_interval` parameter in the BART configuration file to initiate force scanning of WAL files in the `archive_path` to ensure MBM files are generated. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/) for more information about the `scan_interval` parameter. See [Running the BART WAL Scanner](../../03_using_bart/04_running_the_bart_wal_scanner/#running_the_bart_wal_scanner) for information about using the WAL scanner. diff --git a/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/05_restoring_an_incremental_backup.mdx b/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/05_restoring_an_incremental_backup.mdx index c86914aa329..a511a1938fe 100644 --- a/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/05_restoring_an_incremental_backup.mdx +++ b/product_docs/docs/bart/2.5/bart_user/02_overview/01_block-level_incremental_backup/05_restoring_an_incremental_backup.mdx @@ -37,6 +37,6 @@ No editing is needed in the `bart.cfg` file installed on the remote host. **Step 2:** Determine the Linux operating system user account on the remote host to be used as the remote user. This user is specified by the `remote_host` parameter in the BART configuration file or by the `-r` option when using the `RESTORE` subcommand to restore the incremental backup. The remote user must be the owner of the directory where the incremental backup is to be restored on the remote host. By default, the user account is `enterprisedb` for Advanced Server or `postgres` for PostgreSQL. -**Step 3:** Ensure a passwordless SSH/SCP connection is established from the BART user on the BART host to the remote user on the remote host. For information about creating a passwordless SSH/SCP connection, see the *EDB Backup and Recovery Installation and Upgrade Guide*, available at the [EDB website](/bart/latest/bart_inst/). +**Step 3:** Ensure a passwordless SSH/SCP connection is established from the BART user on the BART host to the remote user on the remote host. For information about creating a passwordless SSH/SCP connection, see the *EDB Backup and Recovery Installation and Upgrade Guide*, available at the [EDB website](/bart/2.5/bart_inst/). -When restoring an incremental backup, specify the backup identifier or name of the incremental backup that will be restored. See the [RESTORE](../../03_using_bart/03_basic_bart_subcommand_usage/08_restore/#restore) documentation for more details. To view an example of restoring an incremental backup, see the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/latest/bart_ref/). +When restoring an incremental backup, specify the backup identifier or name of the incremental backup that will be restored. See the [RESTORE](../../03_using_bart/03_basic_bart_subcommand_usage/08_restore/#restore) documentation for more details. To view an example of restoring an incremental backup, see the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/2.5/bart_ref/). diff --git a/product_docs/docs/bart/2.5/bart_user/02_overview/02_creating_a_backup_chain.mdx b/product_docs/docs/bart/2.5/bart_user/02_overview/02_creating_a_backup_chain.mdx index 644040d87f1..d45de3e33ab 100644 --- a/product_docs/docs/bart/2.5/bart_user/02_overview/02_creating_a_backup_chain.mdx +++ b/product_docs/docs/bart/2.5/bart_user/02_overview/02_creating_a_backup_chain.mdx @@ -19,4 +19,4 @@ Since restoration of an incremental backup is dependent upon first restoring the The actions of retention policy management are applied to the full backup and all of its successive incremental backups within the chain in an identical manner as if they were one backup. Thus, use of retention policy management does not result in the breakup of a backup chain. -See the *EDB Backup and Recovery Reference Guide*, available at the [EDB website](/bart/latest/bart_ref/) for examples of creating a backup chain and restoring an incremental backup. +See the *EDB Backup and Recovery Reference Guide*, available at the [EDB website](/bart/2.5/bart_ref/) for examples of creating a backup chain and restoring an incremental backup. diff --git a/product_docs/docs/bart/2.5/bart_user/02_overview/index.mdx b/product_docs/docs/bart/2.5/bart_user/02_overview/index.mdx index 7b5b7e92fb3..8a5e6c69b66 100644 --- a/product_docs/docs/bart/2.5/bart_user/02_overview/index.mdx +++ b/product_docs/docs/bart/2.5/bart_user/02_overview/index.mdx @@ -15,11 +15,11 @@ BART provides a simplified interface for the continuous archiving and point-in-t - Archiving the `Write-Ahead Log segments` (WAL files), which continuously record changes to be made to the database files. - Performing *Point-In-Time Recovery* (PITR) to a specified transaction ID or timestamp with respect to a timeline using a full backup along with successive, [block-level incremental backups](01_block-level_incremental_backup/#block-level_incremental_backup) that reside in the same backup chain, and the WAL files. -Detailed information regarding WAL files and point-in-time recovery is documented in the [PostgreSQL Core Documentation](https://www.postgresql.org/docs/current/static/continuous-archiving.html). +Detailed information regarding WAL files and point-in-time recovery is documented in the [PostgreSQL Core Documentation](https://www.postgresql.org/docs/12/static/continuous-archiving.html). The general term *backup* refers to both full backups and incremental backups. -When taking a full backup of a standby server, BART uses the PostgreSQL `pg_basebackup` utility program. However, it must be noted that for standby servers, you can only take a full backup, but cannot take an incremental or parallel backups. For information about standby servers, see the [PostgreSQL Documentation](https://www.postgresql.org/docs/current/static/high-availability.html). +When taking a full backup of a standby server, BART uses the PostgreSQL `pg_basebackup` utility program. However, it must be noted that for standby servers, you can only take a full backup, but cannot take an incremental or parallel backups. For information about standby servers, see the [PostgreSQL Documentation](https://www.postgresql.org/docs/12/static/high-availability.html). BART uses a centralized backup catalog, a single configuration file, and a command line interface controlling the necessary operations to simplify the management process. Reasonable defaults are automatically used for various backup and restore options. BART also performs the necessary recovery file configuration required for point-in-time recovery using its command line interface. @@ -59,7 +59,7 @@ Other concepts and terms referred to in this document include the following: - **Secure Shell (SSH)/Secure Copy (SCP).** Linux utility programs used to log into hosts (SSH) and copy files (SCP) between hosts. A valid user account must be specified that exists on the target host and in fact is the user account under which the SSH or SCP operations occur. -For information on how all of these components are configured and used with BART, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/latest/bart_inst/). +For information on how all of these components are configured and used with BART, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/2.5/bart_inst/). **Supported BART Operations** diff --git a/product_docs/docs/bart/2.5/bart_user/03_using_bart/01_bart_management_overview/01_performing_a_restore_operation.mdx b/product_docs/docs/bart/2.5/bart_user/03_using_bart/01_bart_management_overview/01_performing_a_restore_operation.mdx index 8b5fdec0c09..f6daf08042c 100644 --- a/product_docs/docs/bart/2.5/bart_user/03_using_bart/01_bart_management_overview/01_performing_a_restore_operation.mdx +++ b/product_docs/docs/bart/2.5/bart_user/03_using_bart/01_bart_management_overview/01_performing_a_restore_operation.mdx @@ -18,7 +18,7 @@ The following steps describe the process of restoring a backup: If you want to restore to a new, empty directory, create the directory on which you want to restore the backed up database cluster. Ensure the data directory can be written to by the BART user account or by the user account specified by the `remote_host` configuration parameter, or by the `--remote-host` option of the `RESTORE` subcommand (if these are to be used). -**Step 4:** Perform the same process for tablespaces as described in Step 3. The `tablespace_path` parameter in the BART configuration file must contain the tablespace directory paths to which the tablespace data files are to be restored. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/) for more information about this parameter. +**Step 4:** Perform the same process for tablespaces as described in Step 3. The `tablespace_path` parameter in the BART configuration file must contain the tablespace directory paths to which the tablespace data files are to be restored. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/) for more information about this parameter. **Step 5:** Identify the backup to use for the restore operation and obtain the backup ID or backup name. @@ -50,8 +50,8 @@ All files and directories must be owned by the user account that you intend to u **Step 11:** Start the database server to initiate recovery. After completion, check the database server log file to ensure the recovery was successful. -If the backup is restored to a different location than where the original database cluster resided, operations dependent upon the database cluster location may fail if supporting service scripts are not updated to reflect the location where the backup has been restored. For information about the use and modification of service scripts, see the EDB Advanced Server Installation Guide available at the [EDB website](/epas/latest/). +If the backup is restored to a different location than where the original database cluster resided, operations dependent upon the database cluster location may fail if supporting service scripts are not updated to reflect the location where the backup has been restored. For information about the use and modification of service scripts, see the EDB Advanced Server Installation Guide. See [Restore](../03_basic_bart_subcommand_usage/08_restore/#restore) for more information about using the BART `Restore` subcommand. -An example of a restore operation is documented in the EDB Backup and Recovery Reference Guide available at the [EDB website](/bart/latest/bart_ref/). +An example of a restore operation is documented in the EDB Backup and Recovery Reference Guide available at the [EDB website](/bart/2.5/bart_ref/). diff --git a/product_docs/docs/bart/2.5/bart_user/03_using_bart/01_bart_management_overview/02_point_in_time_recovery_operation.mdx b/product_docs/docs/bart/2.5/bart_user/03_using_bart/01_bart_management_overview/02_point_in_time_recovery_operation.mdx index caaaa89e7c0..ffd0aa13bf7 100644 --- a/product_docs/docs/bart/2.5/bart_user/03_using_bart/01_bart_management_overview/02_point_in_time_recovery_operation.mdx +++ b/product_docs/docs/bart/2.5/bart_user/03_using_bart/01_bart_management_overview/02_point_in_time_recovery_operation.mdx @@ -48,4 +48,4 @@ The following steps outline how to perform a point-in-time recovery operation fo 9. Start the database server, which will then perform the point-in-time recovery operation if recovery settings are saved in the `postgresql.auto.conf` file. -For a detailed description of the `RESTORE` subcommand, see [Basic BART Subcommand Usage](../03_basic_bart_subcommand_usage/#basic_bart_subcommand_usage). An example of a Point-in-Time Recovery operation is documented in the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/latest/bart_ref/). See [Restore](../03_basic_bart_subcommand_usage/08_restore/#restore) for more information about using the `Restore` subcommand. +For a detailed description of the `RESTORE` subcommand, see [Basic BART Subcommand Usage](../03_basic_bart_subcommand_usage/#basic_bart_subcommand_usage). An example of a Point-in-Time Recovery operation is documented in the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/2.5/bart_ref/). See [Restore](../03_basic_bart_subcommand_usage/08_restore/#restore) for more information about using the `Restore` subcommand. diff --git a/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/03_setting_the_retention_policy.mdx b/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/03_setting_the_retention_policy.mdx index a4396239bfe..f30524d0e69 100644 --- a/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/03_setting_the_retention_policy.mdx +++ b/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/03_setting_the_retention_policy.mdx @@ -8,7 +8,7 @@ legacyRedirectsGenerated: -The retention policy is determined by the `retention_policy` parameter in the BART configuration file. It can be applied globally to all servers, but each server can override the global retention policy with its own. For information about creating a global retention policy and an individual database server retention policy, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/latest/bart_inst/). +The retention policy is determined by the `retention_policy` parameter in the BART configuration file. It can be applied globally to all servers, but each server can override the global retention policy with its own. For information about creating a global retention policy and an individual database server retention policy, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/2.5/bart_inst/). There are two types of retention policies - redundancy retention policy and the recovery window retention policy as described in the following sections. diff --git a/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/04_managing_the_backups_based_on_the_retention_policy.mdx b/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/04_managing_the_backups_based_on_the_retention_policy.mdx index def779a4026..2a744e416e8 100644 --- a/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/04_managing_the_backups_based_on_the_retention_policy.mdx +++ b/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/04_managing_the_backups_based_on_the_retention_policy.mdx @@ -144,4 +144,4 @@ When the `MANAGE` subcommand is invoked, BART evaluates active backups: !!! Note The status of backups currently marked as `obsolete` or `keep` is not changed. To re-evaluate such backups and then classify them, their status must first be reset to `active` with the `MANAGE -c nokeep` option. See [Marking the Backup Status](02_marking_the_backup_status/#marking_the_backup_status) for more information. -See the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/latest/bart_ref/) to review examples of how to evaluate, mark, and delete backups using a redundancy retention policy and recovery window retention policy, as well as examples of `MANAGE` subcommand. +See the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/2.5/bart_ref/) to review examples of how to evaluate, mark, and delete backups using a redundancy retention policy and recovery window retention policy, as well as examples of `MANAGE` subcommand. diff --git a/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/05_managing_incremental_backups.mdx b/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/05_managing_incremental_backups.mdx index c69f6b09086..7b25ea009fe 100644 --- a/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/05_managing_incremental_backups.mdx +++ b/product_docs/docs/bart/2.5/bart_user/03_using_bart/02_managing_backups_using_a_retention_policy/05_managing_incremental_backups.mdx @@ -37,7 +37,7 @@ When a [redundancy retention policy](03_setting_the_retention_policy/#redundancy When determining the number of backups that exceeds the number specified by the `retention_policy` parameter, only full backups are counted for the comparison. Incremental backups are not included in the count for the comparison against the `retention_policy` parameter setting. -See the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/latest/bart_ref/) for examples demonstrating use of the `MANAGE` and `DELETE` subcommands when a redundancy retention policy is in effect. +See the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/2.5/bart_ref/) for examples demonstrating use of the `MANAGE` and `DELETE` subcommands when a redundancy retention policy is in effect. ## Using a Recovery Window Retention Policy with Incremental Backups @@ -48,4 +48,4 @@ If the `MANAGE` command is invoked when BART is configured to use a [recovery wi The status of an incremental backup is changed to `obsolete` regardless of whether or not the date/time of when the incremental backup was taken still lies within the recovery window. -See the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/latest/bart_ref/) for examples demonstrating use of the `MANAGE` and `DELETE` subcommands when a recovery window retention policy is in effect. +See the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/2.5/bart_ref/) for examples demonstrating use of the `MANAGE` and `DELETE` subcommands when a recovery window retention policy is in effect. diff --git a/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/03_backup.mdx b/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/03_backup.mdx index aa12cd957e2..6bb4215ed62 100644 --- a/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/03_backup.mdx +++ b/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/03_backup.mdx @@ -48,9 +48,9 @@ bart BACKUP –s { | all } [ -F p] - Before performing the backup, BART checks to ensure if there is enough disk space to completely store the backup in the BART backup catalog. -- In the `postgresql.conf` file, ensure the `wal_keep_segments` configuration parameter is set to a sufficiently large value. A low setting of the `wal_keep_segments` configuration parameter may result in the deletion of some WAL files before the BART `BACKUP` subcommand saves them to the `archive_path`. For information about the `wal_keep_segments` parameter, see the [PostgreSQL Core Documentation](https://www.postgresql.org/docs/current/static/runtime-config-replication.html). +- In the `postgresql.conf` file, ensure the `wal_keep_segments` configuration parameter is set to a sufficiently large value. A low setting of the `wal_keep_segments` configuration parameter may result in the deletion of some WAL files before the BART `BACKUP` subcommand saves them to the `archive_path`. For information about the `wal_keep_segments` parameter, see the [PostgreSQL Core Documentation](https://www.postgresql.org/docs/12/static/runtime-config-replication.html). -- In the BART configuration file, setting `xlog_method=stream` will instruct the server to stream the transaction log in parallel with creation of the backup for a specific database server; otherwise the transaction log files are collected upon completion of the backup. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/) for details about database server setting. +- In the BART configuration file, setting `xlog_method=stream` will instruct the server to stream the transaction log in parallel with creation of the backup for a specific database server; otherwise the transaction log files are collected upon completion of the backup. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/) for details about database server setting. !!! Note If the transaction log streaming method is used, the `-Fp` option for a plain text backup format must be specified with the `BACKUP` subcommand. @@ -74,7 +74,7 @@ Specify the following options as required. If you do not specify any of the foll | `-c `
`--compress-level ` | This is applicable only for full backup. Specify this option to use the gzip compression level on the tar file output. `compression_level` is a digit from 1 through 9, with 9 being the best compression. This option is applicable only for the tar format. | | `--parent { backup_id \| backup_name }` | Specify this option to take an incremental backup. `` is the backup identifier of a parent backup. `` is the user-defined alphanumeric name of a parent backup.
The parent is a backup taken prior to the incremental backup. The parent backup can be either a full backup or an incremental backup.
The option `–Fp` must be specified since an incremental backup can only be taken in plain text format.
An incremental backup cannot be taken on a standby database server. See [Block-Level Incremental Backup](../../02_overview/01_block-level_incremental_backup/#block-level_incremental_backup) for additional information on incremental backups. | | `--backup-name ` | Specify this option to assign a user-defined, alphanumeric friendly name to the backup. The maximum permitted length of backup name is 49 characters.
The backup name may include the following variables to be substituted by the timestamp values when the backup is taken: 1) `%year` – 4-digit year, 2) `%month` – 2-digit month, 3) `%day` – 2-digit day, 4) `%hour` 2-digit hour, 5) `%minute` – 2-digit minute, and 6) `%second` – 2-digit second.
To include the percent sign (`%`) as a character in the backup name, specify `%%` in the alphanumeric string.
If the backup name contains space characters (i.e. more than one word) or when referenced with the option `-i` by other subcommands (such as `restore`), enclose the string in single quotes or double quotes. See [backup name examples](#backup_name_examples).
If the `--backup-name` option is not specified, and the `backup_name` parameter is not set for this database server in the BART configuration file, then the backup can only be referenced in other BART subcommands by the BART assigned backup identifier. | -| `--thread-count ` | Use this option to use the number of worker threads to run in parallel to copy blocks for a backup.
If the option `--thread-count` is omitted, then the `thread_count` parameter in the BART configuration file applicable to this database server is used.
If the option `--thread-count` is not enabled for this database server, then the `thread_count` setting in the global section of the BART configuration file is used.
If the option `--thread-count` is not set in the global section as well, the default number of threads is 1.
If parallel backup is run with N number of worker threads, then it will initiate N+ 1 concurrent connections with the server.
Thread count will not be effective if backup is taken on a standby server.
For more information about the `--thread-count` parameter, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/latest/bart_inst/) | +| `--thread-count ` | Use this option to use the number of worker threads to run in parallel to copy blocks for a backup.
If the option `--thread-count` is omitted, then the `thread_count` parameter in the BART configuration file applicable to this database server is used.
If the option `--thread-count` is not enabled for this database server, then the `thread_count` setting in the global section of the BART configuration file is used.
If the option `--thread-count` is not set in the global section as well, the default number of threads is 1.
If parallel backup is run with N number of worker threads, then it will initiate N+ 1 concurrent connections with the server.
Thread count will not be effective if backup is taken on a standby server.
For more information about the `--thread-count` parameter, see the EDB Backup and Recovery Installation and Upgrade Guide available at the [EDB website](/bart/2.5/bart_inst/) | | `--with-pg_basebackup` | This is applicable only for full backup. Specify this option to use `pg_basebackup` to take a full backup. The number of thread counts in effect is ignored as given by the `thread_count` parameter in the BART configuration file.
When taking a full backup, if the thread count in effect is greater than `1`, then the `pg_basebackup` utility is not used to take the full backup (parallel worker threads are used) unless the option `--with-pg_basebackup` is specified with the `BACKUP` subcommand. | | `--no-pg_basebackup` | This is applicable only for full backup. Specify this option if you do not want `pg_basebackup` to be used to take a full backup.
When taking a full backup, if the thread count in effect is only `1`, then the `pg_basebackup` utility is used to take the full backup unless the option `--no-pg_basebackup` is specified with the `BACKUP` subcommand. | | `--check` | This is applicable only for incremental backup. Specify this option to verify if the required MBM files are present in the `archived_wals` directory as specified in the `archive_path` parameter in the `bart.cfg` file before taking an incremental backup. The option `--parent` must be specified when the option `--check` is used. An actual incremental backup is not taken when the option `--check` is specified. | diff --git a/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/07_manage.mdx b/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/07_manage.mdx index 52a74badaa7..17424fed04b 100644 --- a/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/07_manage.mdx +++ b/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/07_manage.mdx @@ -12,7 +12,7 @@ The `MANAGE` subcommand can be invoked to: - Evaluate backups, mark their status, and delete obsolete backups based on the `retention_policy` parameter in the BART configuration file (See [Managing Backups Using a Retention Policy](../02_managing_backups_using_a_retention_policy/#managing_backups_using_a_retention_policy) for information about retention policy management). -- Compress the archived WAL files based on the `wal_compression` parameter in the BART configuration file (See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/) for information about setting this parameter). +- Compress the archived WAL files based on the `wal_compression` parameter in the BART configuration file (See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/) for information about setting this parameter). **Syntax:** diff --git a/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/08_restore.mdx b/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/08_restore.mdx index d74a4d1f5d6..0d9b25e4fbd 100644 --- a/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/08_restore.mdx +++ b/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/08_restore.mdx @@ -22,14 +22,14 @@ bart RESTORE –s -p [ -c ] ``` -For information about using a continuous archive backup for recovery, see the [PostgreSQL Core Documentation](https://www.postgresql.org/docs/13/static/continuous-archiving.html). This reference material provides detailed information about the underlying point-in-time recovery process and the meaning and usage of the restore options that are generated into the `postgresql.auto.conf` file by BART. +For information about using a continuous archive backup for recovery, see the [PostgreSQL Core Documentation](https://www.postgresql.org/docs/12/static/continuous-archiving.html). This reference material provides detailed information about the underlying point-in-time recovery process and the meaning and usage of the restore options that are generated into the `postgresql.auto.conf` file by BART. **Please note**: - For special requirements when restoring an incremental backup to a remote database server, see [Restoring an Incremental Backup on a Remote Host](../../02_overview/01_block-level_incremental_backup/05_restoring_an_incremental_backup/#restoring_an_incremental_backup_on_a_remote_host). - Check to ensure that the host where the backup is to be restored contains enough disk space for the backup and its archived WAL files. The `RESTORE` subcommand may result in an error while copying files if there is not enough disk space available. - See [Performing a Restore Operation](../01_bart_management_overview/01_performing_a_restore_operation/#performing_a_restore_operation) to view steps on how to perform a restore operation and see [Point-In-Time Recovery Operation](../01_bart_management_overview/02_point_in_time_recovery_operation/#point_in_time_recovery_operation) to view steps on how to perform a point-in-time recovery operation. -- If the backup is restored to a different database cluster directory than where the original database cluster resided, certain operations dependent upon the database cluster location may fail. This happens if their supporting service scripts are not updated to reflect the new directory location of restored backup. For information about the usage and modification of service scripts, see the *EDB Advanced Server Installation Guide* available at the [EDB website](/epas/latest/). +- If the backup is restored to a different database cluster directory than where the original database cluster resided, certain operations dependent upon the database cluster location may fail. This happens if their supporting service scripts are not updated to reflect the new directory location of restored backup. For information about the usage and modification of service scripts, see the *EDB Advanced Server Installation Guide*. The following table describes the command options: @@ -38,9 +38,9 @@ The following table describes the command options: | `-s `
`--server ` | `` is the name of the database server to be restored. | | `-p `
`--restore-path ` | `` is the directory path where the backup of the database server is to be restored. The directory must be empty and have the proper ownership and privileges assigned to it. | | `-i { \| }`

`--backupid { \| }` | `` is the backup identifier of the backup to be used for the restoration and `` is the user-defined alphanumeric name for the backup.
If the option is omitted, the default is to use the latest backup. | -| `-r or --remote-host ` | `` is the user account on the remote database server host that accepts a passwordless SSH/SCP login connection and is the owner of the directory where the backup is to be restored and `` is the IP address of the remote host to which the backup is to be restored. This option must be specified if the `` parameter for this database server is not set in the BART configuration file.
If the BART user account is not the same as the operating system account owning the `` directory given with the `-p` option, use the `` BART configuration parameter or the `RESTORE` subcommand `-r` option to specify the `` directory owner even when restoring to a directory on the same host as the BART host.
See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/) for information about the `` parameter. | +| `-r or --remote-host ` | `` is the user account on the remote database server host that accepts a passwordless SSH/SCP login connection and is the owner of the directory where the backup is to be restored and `` is the IP address of the remote host to which the backup is to be restored. This option must be specified if the `` parameter for this database server is not set in the BART configuration file.
If the BART user account is not the same as the operating system account owning the `` directory given with the `-p` option, use the `` BART configuration parameter or the `RESTORE` subcommand `-r` option to specify the `` directory owner even when restoring to a directory on the same host as the BART host.
See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/) for information about the `` parameter. | | `-w `
`--workers ` | `` is the specification of the number of worker processes to run in parallel to stream the modified blocks of an incremental backup to the restore location.
For example, if 4 worker processes are specified, 4 receiver processes on the restore host and 4 streamer processes on the BART host are used. The output of each streamer process is connected to the input of a receiver process. When the receiver gets to the point where it needs a modified block file, it obtains those modified blocks from its input. With this method, the modified block files are never written to the restore host disk. If the `-w` option is omitted, the default is `1` \| worker process. | | `-t `
`--target-tli ` | `` is the integer identifier of the timeline to be used for replaying the archived WAL files for point-in-time recovery. | | `-x `
`--target-xid ` | `` is the integer identifier of the transaction ID that determines the transaction up to and including, which point-in-time recovery encompasses. Include either the `-x ` or the `--target-xid ` option if point-in-time recovery is desired. | | `-g `

`--target-timestamp ` | `` is the timestamp that determines the point in time up to and including, which point-in-time recovery encompasses. Include either the `--target-timestamp ` or the `-g ` option if point-in-time recovery is desired. | -| `-c`
`--copy-wals` | Specify this option to copy archived WAL files from the BART backup catalog to `/archived_wals` directory.
If recovery settings are saved in the `postgresql.auto.conf` file for point-in-time recovery, the `restore_command` retrieves the WAL files from `/archived_wals` for the database server archive recovery.
If the `-c` option is omitted and the `copy_wals_during_restore` parameter in the BART configuration file is not enabled in a manner applicable to this database server, the `restore_command` in the `postgresql.auto.conf` file is generated by default to retrieve the archived WAL files directly from the BART backup catalog. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/) for information about the `copy_wals_during_restore` parameter. | \ No newline at end of file +| `-c`
`--copy-wals` | Specify this option to copy archived WAL files from the BART backup catalog to `/archived_wals` directory.
If recovery settings are saved in the `postgresql.auto.conf` file for point-in-time recovery, the `restore_command` retrieves the WAL files from `/archived_wals` for the database server archive recovery.
If the `-c` option is omitted and the `copy_wals_during_restore` parameter in the BART configuration file is not enabled in a manner applicable to this database server, the `restore_command` in the `postgresql.auto.conf` file is generated by default to retrieve the archived WAL files directly from the BART backup catalog. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/) for information about the `copy_wals_during_restore` parameter. | \ No newline at end of file diff --git a/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/index.mdx b/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/index.mdx index 379dd137082..8dc5080fcef 100644 --- a/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/index.mdx +++ b/product_docs/docs/bart/2.5/bart_user/03_using_bart/03_basic_bart_subcommand_usage/index.mdx @@ -10,7 +10,7 @@ legacyRedirectsGenerated: This section briefly describes the BART subcommands and options. You can invoke the `bart` program (located in the `/bin` directory) with the desired options and subcommands to manage your BART installation. -To view examples of BART subcommands, see the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/latest/bart_ref/). +To view examples of BART subcommands, see the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/2.5/bart_ref/). **Syntax for invoking BART**: @@ -39,7 +39,7 @@ If execution of BART subcommands fails with the following error message, then yo **Workaround:** Set the `LD_LIBRARY_PATH` environment variable for the BART user account to include the directory containing the `libpq` library. This directory is `POSTGRES_INSTALL_HOME/lib`. -It is suggested that the `PATH` and the `LD_LIBRARY_PATH` environment variable settings be placed in the BART user account’s profile. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/) for details. +It is suggested that the `PATH` and the `LD_LIBRARY_PATH` environment variable settings be placed in the BART user account’s profile. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/) for details. In the following sections, the `help` option is omitted from the syntax diagrams for the purpose of providing readability for the subcommand options. diff --git a/product_docs/docs/bart/2.5/bart_user/03_using_bart/04_running_the_bart_wal_scanner.mdx b/product_docs/docs/bart/2.5/bart_user/03_using_bart/04_running_the_bart_wal_scanner.mdx index b047505556e..9b111c3ba0e 100644 --- a/product_docs/docs/bart/2.5/bart_user/03_using_bart/04_running_the_bart_wal_scanner.mdx +++ b/product_docs/docs/bart/2.5/bart_user/03_using_bart/04_running_the_bart_wal_scanner.mdx @@ -30,13 +30,13 @@ bart-scanner The WAL scanner processes each WAL file to find and record modified blocks in a corresponding modified block map (MBM) file. The default approach is that the WAL scanner gets notified whenever a new WAL file is added to the `archived_wals` directory specified in the `archive_path` parameter of the configuration file. It then scans the WAL file and produces the MBM file. -The default approach does not work in some cases; for example when the WAL files are shipped to the `archive_path` using the Network File System (NFS) and also in case of some specific platforms. This results in the WAL files being copied to the `archived_wals` directory, but the WAL scanner does not scan them (as WAL scanner is not aware of WAL file) and produce the MBM files. This results in the failure of an incremental backup. This can be avoided by using the timer-based WAL scanning approach, which is done by using the `scan_interval` parameter in the BART configuration file. The value for `scan_interval` is the number of seconds after which the WAL scanner will initiate force scanning of the new WAL files. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/) for more information about `scan_interval` parameter. +The default approach does not work in some cases; for example when the WAL files are shipped to the `archive_path` using the Network File System (NFS) and also in case of some specific platforms. This results in the WAL files being copied to the `archived_wals` directory, but the WAL scanner does not scan them (as WAL scanner is not aware of WAL file) and produce the MBM files. This results in the failure of an incremental backup. This can be avoided by using the timer-based WAL scanning approach, which is done by using the `scan_interval` parameter in the BART configuration file. The value for `scan_interval` is the number of seconds after which the WAL scanner will initiate force scanning of the new WAL files. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/) for more information about `scan_interval` parameter. When the `bart-scanner` program is invoked, it forks a separate process for each database server enabled with the `allow_incremental_backups` parameter. The WAL scanner processes can run in either the foreground or background depending upon usage of the `--daemon` option. Use the `--daemon` option to run the WAL scanner process in the background so that all output messages can be viewed in the BART log file. If the `--daemon` option is omitted, the WAL scanner process runs in the foreground and all output messages can be viewed from the terminal running the program as well as in the BART log file. -See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/) for additional information about WAL scanning, `allow_incremental_backups`, and `logfile` parameters. +See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/) for additional information about WAL scanning, `allow_incremental_backups`, and `logfile` parameters. !!! Note The BART user account’s `LD_LIBRARY_PATH` environment variable may need to be set to include the directory containing the `libpq` library if invocation of the WAL scanner program fails. See [Basic BART Subcommand Usage](03_basic_bart_subcommand_usage/#basic_bart_subcommand_usage) for information about setting the `LD_LIBRARY_PATH` environment variable. diff --git a/product_docs/docs/bart/2.5/bart_user/04_using_tablespaces.mdx b/product_docs/docs/bart/2.5/bart_user/04_using_tablespaces.mdx index 94d5450778a..1615d140aef 100644 --- a/product_docs/docs/bart/2.5/bart_user/04_using_tablespaces.mdx +++ b/product_docs/docs/bart/2.5/bart_user/04_using_tablespaces.mdx @@ -52,8 +52,8 @@ In either case, the directories specified in the `tablespace_path` parameter mus If the database server is running on a remote host (in other words you are also using the `remote_host` configuration parameter or will specify the `--remote-host` option with the `RESTORE` subcommand), the specified tablespace directories must exist on the specified remote host. -To view example of backing up and restoring a database cluster on a remote host containing tablespaces, see the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/latest/bart_ref/). +To view example of backing up and restoring a database cluster on a remote host containing tablespaces, see the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/2.5/bart_ref/). The directories must be owned by the user account with which you intend to start the database server (typically the Postgres user account) with no access by other users or groups as is required for the directory path to which the main full backup is to be restored. -To view a sample BART managed backup and recovery system consisting of both local and remote database servers, see the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/latest/bart_ref/). +To view a sample BART managed backup and recovery system consisting of both local and remote database servers, see the *EDB Backup and Recovery Reference Guide* available at the [EDB website](/bart/2.5/bart_ref/). diff --git a/product_docs/docs/bart/2.6/bart_user/02_overview/01_block-level_incremental_backup/03_wal_scanning_preparation_for_an_incremental_backup.mdx b/product_docs/docs/bart/2.6/bart_user/02_overview/01_block-level_incremental_backup/03_wal_scanning_preparation_for_an_incremental_backup.mdx index 5264499e13e..b62fd717336 100644 --- a/product_docs/docs/bart/2.6/bart_user/02_overview/01_block-level_incremental_backup/03_wal_scanning_preparation_for_an_incremental_backup.mdx +++ b/product_docs/docs/bart/2.6/bart_user/02_overview/01_block-level_incremental_backup/03_wal_scanning_preparation_for_an_incremental_backup.mdx @@ -49,6 +49,6 @@ MBM files have the suffix, `.mbm`. In preparation for any incremental backup, the WAL files should be scanned as soon as they are copied to the `archive_path`. Thus, the WAL scanner should be running as soon as the WAL files from the database cluster are archived to the `archive_path`. If the `archive_path` contains WAL files that have not yet been scanned, starting the WAL scanner begins scanning these files. If WAL file fails to be scanned (resulting in a missing MBM file), you can use the WAL scanner to specify an individual WAL file. -Under certain conditions such as when the Network File System (NFS) is used to copy WAL files to the `archive_path`, the WAL files may have been missed by the WAL scanner program for scanning and creation of MBM files. Use the `scan_interval` parameter in the BART configuration file to initiate force scanning of WAL files in the `archive_path` to ensure MBM files are generated. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/latest/bart_inst/) for more information about the `scan_interval` parameter. +Under certain conditions such as when the Network File System (NFS) is used to copy WAL files to the `archive_path`, the WAL files may have been missed by the WAL scanner program for scanning and creation of MBM files. Use the `scan_interval` parameter in the BART configuration file to initiate force scanning of WAL files in the `archive_path` to ensure MBM files are generated. See the *EDB Backup and Recovery Installation and Upgrade Guide* available at the [EDB website](/bart/2.5/bart_inst/) for more information about the `scan_interval` parameter. See [Running the BART WAL Scanner](../../03_using_bart/04_running_the_bart_wal_scanner/#running_the_bart_wal_scanner) for information about using the WAL scanner. diff --git a/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/edb_pgadmin4_first_look.png b/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/edb_pgadmin4_first_look.png old mode 100644 new mode 100755 index f1bfa944568..8aad2a70e56 --- a/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/edb_pgadmin4_first_look.png +++ b/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/edb_pgadmin4_first_look.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e65b894e34e6e94a7b4bbeecd392d3b5abeb2d6b5196ea4305a7d120147946f9 -size 251034 +oid sha256:9201126dc356e396eb07869e4e30d441d81e3e81c9176117fbcef61eab057863 +size 235286 diff --git a/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/pgadmin4_from_applications_menu.png b/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/pgadmin4_from_applications_menu.png index c6436c1947c..0aa5f50f062 100644 --- a/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/pgadmin4_from_applications_menu.png +++ b/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/pgadmin4_from_applications_menu.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20efca56bdb4d58c12edfd14c972233e189cc6989d8a4d762736de4b294832aa -size 60486 +oid sha256:69006ea7125cfe9f27ed9774e5aa7965394bfb4d657b97a97ac890a14e30e695 +size 63593 diff --git a/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/server_general.png b/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/server_general.png old mode 100644 new mode 100755 index 1270787fbd5..ffaf97e7c8f --- a/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/server_general.png +++ b/product_docs/docs/epas/13/edb_pgadmin_linux_qs/images/server_general.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8bafab3b789370c9c32496438081679c361f07897652453a6638402b8c4fc20 -size 60984 +oid sha256:c44b464e549592db99d1faf3775af79da683ae6ade216d5f40da26ac21dfbadf +size 54040 diff --git a/product_docs/docs/jdbc_connector/42.2.19.1/02_requirements_overview.mdx b/product_docs/docs/jdbc_connector/42.2.19.1/02_requirements_overview.mdx index 29073ab373e..60b55ef0a3a 100644 --- a/product_docs/docs/jdbc_connector/42.2.19.1/02_requirements_overview.mdx +++ b/product_docs/docs/jdbc_connector/42.2.19.1/02_requirements_overview.mdx @@ -37,3 +37,7 @@ The EDB JDBC Connector graphical installers are supported on the following Windo - Windows 10 - Windows 8.1 + +## Supported JDK Distribution + +Java Virtual Machine (JVM): Java SE 8 or higher (LTS version), including Oracle JDK, OpenJDK, and IBM SDK (Java) distributions. diff --git a/product_docs/docs/net_connector/4.0.10.2/01_whats_new.mdx b/product_docs/docs/net_connector/4.0.10.2/01_whats_new.mdx new file mode 100644 index 00000000000..f597c6fe257 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/01_whats_new.mdx @@ -0,0 +1,13 @@ +--- +title: "What’s New" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.0.10.2/whats_new.html" +--- + + + +The following features are added to create EDB .NET Connector `4.0.10.2`: + +- This version introduced a new connection parameter, Load Role Based Tables. For more information, see Connection String Parameters. diff --git a/product_docs/docs/net_connector/4.0.10.2/02_requirements_overview.mdx b/product_docs/docs/net_connector/4.0.10.2/02_requirements_overview.mdx new file mode 100644 index 00000000000..aee97439cbf --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/02_requirements_overview.mdx @@ -0,0 +1,30 @@ +--- +title: "Requirements Overview" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/requirements_overview.html" +--- + + + +The following section details the supported platforms for the EDB .NET Connector. + +## Supported Server Versions and Platforms + +The EDB .NET Connector is certified with Advanced Server version 9.5 and above. + +The EDB .NET Connector graphical installers are supported on the following Windows platforms: + +64-bit Windows: + +- Windows Server 2019 +- Windows Server 2016 +- Windows Server 2012 R2 + +32-bit Windows: + +- Windows 10 +- Windows 8 +- Windows 7 + diff --git a/product_docs/docs/net_connector/4.0.10.2/03_the_advanced_server_net_connector_overview.mdx b/product_docs/docs/net_connector/4.0.10.2/03_the_advanced_server_net_connector_overview.mdx new file mode 100644 index 00000000000..2ddef51af91 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/03_the_advanced_server_net_connector_overview.mdx @@ -0,0 +1,38 @@ +--- +title: "The EDB .NET Connector - Overview" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/the_advanced_server_net_connector_overview.html" +--- + + + +The EDB .NET Connector is a .NET data provider that allows a client application to connect to a database stored on an Advanced Server host. The .NET Connector accesses the data directly, allowing the client application optimal performance, a broad spectrum of functionality, and access to Advanced Server features. + +The .NET Connector supports following frameworks: + +- `.NET Framework versions 4.0 and 4.5.1` +- `.NET Standard 2.0` +- `Entity Framework 5/6` + + +## The .NET Class Hierarchy + +The .NET Class Hierarchy contains a number of classes that you can use to create objects that control a connection to the Advanced Server database and manipulate the data stored on the server. The following are just a few of the most commonly used object classes: + +`EDBConnection` + + The `EDBConnection` class represents a connection to Advanced Server. An `EDBConnection` object contains a `ConnectionString` that instructs the .NET client how to connect to an Advanced Server database. + +`EDBCommand` + + An `EDBCommand` object contains an SQL command that the client will execute against Advanced Server. Before you can execute an `EDBCommand` object, you must link it to an `EDBConnection` object. + +`EDBDataReader` + + An `EDBDataReader` object provides a way to read an Advanced Server result set. You can use an `EDBDataReader` object to step through one row at a time, forward-only. + +`EDBDataAdapter` + + An `EDBDataAdapter` object links a result set to the Advanced Server database. You can modify values and use the `EDBDataAdapter` class to update the data stored in an Advanced Server database. diff --git a/product_docs/docs/net_connector/4.0.10.2/04_installing_and_configuring_the_net_connector.mdx b/product_docs/docs/net_connector/4.0.10.2/04_installing_and_configuring_the_net_connector.mdx new file mode 100644 index 00000000000..9055b93cf91 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/04_installing_and_configuring_the_net_connector.mdx @@ -0,0 +1,414 @@ +--- +title: "Installing and Configuring the .NET Connector" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/installing_and_configuring_the_net_connector.html" +--- + + + +This chapter describes how to install and configure the EDB .NET Connector. + +## Installing the .NET Connector + +You can use the EDB .NET Connector Installer (available [from the EDB website](https://www.enterprisedb.com/software-downloads-postgres)) to add the .NET Connector to your system. After downloading the installer, right-click on the installer icon, and select `Run As Administrator` from the context menu. When prompted, select an installation language and click `OK` to continue to the `Setup` window. + +![The .NET Connector Installation wizard](images/dotnet_installation_wizard.png) + +The .NET Connector Installation wizard + +Click `Next` to continue. + +![The Installation dialog](images/dotnet_installation_dialog.png) + +The Installation dialog + +Use the `Installation Directory` dialog to specify the directory in which the connector will be installed, and click `Next` to continue. + +![The Ready to Install dialog](images/ready_to_install.png) + +The Ready to Install dialog + +Click `Next` on the `Ready to Install` dialog to start the installation; popup dialogs confirm the progress of the installation wizard. + +![The installation is complete](images/dotnet_installation_complete.png) + +The installation is complete + +When the wizard informs you that it has completed the setup, click the `Finish` button to exit the dialog. + +You can also use StackBuilder Plus to add or update the connector on an existing Advanced Server installation; to open StackBuilder Plus, select `StackBuilder Plus` from the Windows `Apps` menu. + +![Starting StackBuilder Plus](images/starting_stackbuilder_plus.png) + +Starting StackBuilder Plus + +When StackBuilder Plus opens, follow the onscreen instructions. + +Select the `EnterpriseDB.Net Connector` option from the `Database Drivers` node of the tree control. + +![Selecting the Connectors installer](images/selecting_the_connectors_installer.png) + +Selecting the Connectors installer + +Follow the directions of the onscreen wizard to add or update an installation of an EDB Connector. + +## Configuring the .NET Connector + +Please see the following environment-specific sections for information about configuring the .NET Connector: + +- **Referencing the Library Files.** [General configuration information](#referencing_the_library_files) applicable to all components. +- **.NET Framework 4.0** Instructions for configuring for use with [.NET Framework 4.0](#framework_setup_4_6_1). +- **.NET Framework 4.5** Instructions for configuring for use with [.NET Framework 4.5](#framework_setup_4_7_2). +- **.NET Framework 4.5.1** Instructions for configuring for use with [.NET Framework 4.5.1](#framework_setup_4_8). +- **.NET Standard 2.0.** Instructions for configuring for use with [.NET Standard 2.0](#standard_setup_2). +- **Entity Framework 5/6.** Instructions for configuring for use with [Entity Framework](#entity_setup_5_6). +- **EDB VSIX.** Instructions for configuring for use with [EDB VSIX](#vsix_setup). + +Please see the following environment-specific sections for information about configuring the .NET Connector: +• Referencing the Library Files. General configuration information applicable to all com- ponents. +• .NET Framework 4.0. Instructions for configuring for use with .NET Framework 4.0. +• .NET Framework 4.5. Instructions for configuring for use with .NET Framework 4.5. +• .NET Framework 4.5.1. Instructions for configuring for use with .NET Framework 4.5.1. +• .NET Standard 2.0. Instructions for configuring for use with .NET Standard 2.0. +• Entity Framework 5/6. Instructions for configuring for use with Entity Framework. +• EnterpriseDB VSIX. Instructions for configuring for use with EnterpriseDB VSIX. + +### Referencing the Library Files + + + +To reference library files with Microsoft Visual Studio: + +1. Select the project in the `Solution Explorer`. +2. Select `Add Reference` from the `Project` menu. +3. When the `Add Reference` dialog box opens, browse to select the appropriate library files. + +Optionally, the library files can be copied to the specified location. + +Before you can use an EDB .NET class, you must import the namespace into your program. Importing a namespace makes the compiler aware of the classes available within the namespace. The namespace is: + + `EnterpriseDB.EDBClient` + +If you are using Entity Framework 6, the following additional namespace is required: + + `EntityFramework6.EntepriseDB.EDBClient` + +The method you use to include the namespace varies by the type of application you are writing. For example, the following command imports a namespace into an `ASP.NET` page: + + `<% import namespace="EnterpriseDB.EDBClient" %>` + +To import a namespace into a C# application, write: + + `using EnterpriseDB.EDBClient;` + +### .NET Framework Setup + +The following sections describe the setup for various .NET versions. + + + +#### .NET Framework 4.0 + +If you are using .NET Framework version 4.0, the data provider installation path is: + + `C:\Program Files\edb\dotnet\net40\` + +The following shared library files are required: + + `EDBDataProvider.2.0.2.dll` + + `Mono.Security.dll` + +Depending upon the type of application you use, you may be required to import the namespace into the source code. See [Referencing the Library Files](#referencing_the_library_files) for this and other information about referencing library files. + + + +#### .NET Framework 4.5 + +If you are using .NET Framework version 4.5, the data provider installation path is: + + `C:\Program Files\edb\dotnet\net45\` + + You must add the following dependencies to your project: + + `EnterpriseDB.EDBClient.dll` + + `System.Threading.Tasks.Extensions.dll` + + `System.Runtime.CompilerServices.Unsafe.dll` + + `System.ValueTuple.dll` + + `System.Memory.dll` + + You must also add the following dependencies to your project: + + `System.Threading.Tasks.Extensions.dll` + + `System.Runtime.CompilerServices.Unsafe.dll` + + `System.ValueTuple.dll` + + `System.Memory.dll` + + +Depending upon the type of application you use, you may be required to import the namespace into the source code. See [Referencing the Library Files](#referencing_the_library_files) for this and other information about referencing library files. + + + +#### .NET Framework 4.5.1 + +If you are using .NET Framework version 4.5.1, the data provider installation path is: + + `C:\Program Files\edb\dotnet\net451\` + + You must add the following dependencies to your project: + + `EnterpriseDB.EDBClient.dll` + + `System.Threading.Tasks.Extensions.dll` + + `System.Runtime.CompilerServices.Unsafe.dll` + + `System.ValueTuple.dll` + + `System.Memory.dll` + + You must also add the following dependencies to your project: + + `System.Threading.Tasks.Extensions.dll` + + `System.Runtime.CompilerServices.Unsafe.dll` + + `System.ValueTuple.dll` + + `System.Memory.dll` + +Depending upon the type of application you use, you may be required to import the namespace into the source code. See [Referencing the Library Files](#referencing_the_library_files) for this and other information about referencing library files. + + + +#### .NET Standard 2.0 + +For .NET Standard Framework 2.0, the data provider installation path is: + + `C:\Program Files\edb\dotnet\netstandard2.0\` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.dll` + + `System.Threading.Tasks.Extensions.dll` + + `System.Runtime.CompilerServices.Unsafe.dll` + + `System.ValueTuple.dll` + + You must also add the following dependencies to your project: + + `System.Runtime.CompilerServices.Unsafe.dll` + + `System.ValueTuple.dll` + +!!! Note + If your target framework is .Net Core 2.0, then include the following file in your project: + +`System.Threading.Tasks.Extensions.dll` + +Depending upon the type of application you use, you may be required to import the namespace into the source code. See [Referencing the Library Files](#referencing_the_library_files) for this and other information about referencing library files. + + + +### Entity Framework 5/6 + +To set up .NET Connector for usage with Entity Framework, the data provider installation path is: + + `C:\Program Files\edb\dotnet\EF\` + +The following shared library files are required: + + `EntityFramework5.EnterpriseDB.EDBClient.dll` + + `EntityFramework6.EnterpriseDB.EDBClient.dll` + + +To configure the .NET Connector for use with Entity Framework, the data provider installation path is: + +!!! Note + Entity Framework can be used with the `EnterpriseDB.EDBClient.dll` library available in the `net45` and `net451` subdirectories. + +See [Referencing the Library Files](#referencing_the_library_files) for information about referencing library files. + +Add the `` entries for the `ADO.NET` driver for Postgres to the `app.config` file. Add the following entries: + +```text + +``` + +In the project’s `app.config` file add the following entry for provider services under the EntityFramework/providers tag: + +```text + + +``` + +The following is an example of the `app.config` file: + +```text + + + +
+ + + + + + + + + + + + + + + + + + + + +``` + +!!! Note + The same entries for `` and `` are valid for the `web.config` file and the `app.config` file. + +Depending upon the type of application you are using, you may be required to import the namespace into the source code (see [Referencing the Library Files](#referencing_the_library_files)). + +For usage information about Entity Framework, refer to the Microsoft documentation. + + + +### EDB VSIX for Visual Studio 2015/2017/2019 + +The EDB Data Designer Extensibility Provider (EDB VSIX) is a component that integrates Advanced Server database access into Visual Studio, thus providing Visual Studio integrated features. + +EDB VSIX allows you to connect to Advanced Server from within Visual Studio's Server Explorer and create a model from an existing database. Therefore, if Visual Studio features are desired, then EDB VSIX must be utilized. + +EDB VSIX files are located in the following directory: + + `C:\Program Files\edb\dotnet\vsix` + +The files available at the above location are: + + `EnterpriseDB.vsix` + `SSDLToPgSQL.tt` + +#### Installation and Configuration for Visual Studio 2015/2017/2019 + +Use the following steps to install and configure EDB VSIX. + +**Step 1:** Install EDB VSIX to the desired version of Visual Studio with the `EnterpriseDB.vsix` installer. + +If you already have an earlier version of the VSIX installed, we highly recommended that you uninstall it to avoid conflicts. + +It is no longer necessary or recommended to have `EnterpriseDB.EDBClient` in your global assembly cache (GAC). + +**Step 2:** Relaunch Visual Studio and verify from the `Tools > Extensions and Updates…` menu that the EDB extension is installed. + +**Step 3:** Use the gacutil utility at the Visual Studio Developers Command Line from the fol- lowing location to add the System.ValueTuple.dll library to the global assembly cache (GAC): + +`C:\Program Files\edb\dotnet\vsix\System.ValueTuple.dll` + +For example: + +`gacutil.exe /i System.ValueTuple.dll` + + +**Step 4:** From the Server Explorer, right-click on `Data Connections`, click `Add Connection`, and verify that the `Enterprisedb Postgres Database` data source is available. + +#### Model First and Database First Usage + +**Step 1:** Use the gacutil utility at the Visual Studio Developers Command Line to add the EntityFramework5.EnterpriseDB.EDBClient.dll library to the global assembly cache (GAC): + +For example: + +`gacutil.exe /i EntityFramework5.EnterpriseDB.EDBClient.dll` + +**Step 2:** Add the `` entries for the ADO.NET driver to the `machine.config` file. Include the following entries: + +```text + +``` + +For the attribute-value pairs, the double-quoted strings should not contain excess white space characters, but be configured on a single line. The examples shown in this section may be split on multiple lines for clarity, but should actually be configured within a single line such as the following: + +`description=".NET Data Provider for EnterpriseDB PostgreSQL"` + +For 64-bit Windows, the `machine.config` file is in the following location: + +`C:\Windows\Microsoft.NET\Framework64\v4.0.30319\Config\machine.config` + +For 32-bit Windows, the `machine.config` file is in the following location: + +`C:\Windows\Microsoft.NET\Framework\v4.0.30319\Config\machine.config` + +**Step 3:** Place the DDL generation template `SSDLToPgSQL.tt` in the Visual Studio `EntityFramework Tools\DBGen\` folder as shown in the following example: + +```text +C:\Program Files (x86)\Microsoft Visual Studio 14.0\Common7\IDE\Extensions\Microsoft\EntityFramework Tools\DBGen\ +``` + +!!! Note + Select this template `SSDLToPgSQL.tt` in your EDMX file properties. + +**Step 4:** Add the `EnterpriseDB.EDBClient.dll` and `EntityFramework6.EnterpriseDB.EDBClient.dll` files to project references. see [Referencing the Library Files](#referencing_the_library_files) for information about referencing library files. + +**Step 5:** In the project’s `app.config` file, add the following entry for provider services under the EntityFramework/providers tag: + +```text + + +``` + +The following is an example of the `app.config` file. + +```text + + + +
+ + + + + + + + + + + + + + + + + + + + +``` diff --git a/product_docs/docs/net_connector/4.0.10.2/05_using_the_net_connector.mdx b/product_docs/docs/net_connector/4.0.10.2/05_using_the_net_connector.mdx new file mode 100644 index 00000000000..d5ffd64eb51 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/05_using_the_net_connector.mdx @@ -0,0 +1,27 @@ +--- +title: "Using the .NET Connector" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/using_the_net_connector.html" +--- + + + +The sections that follow provide examples that demonstrate using the EDB object classes that are provided by the EDB .NET Connector that allow a .NET application to connect to and interact with an Advanced Server database. + +To use the examples in this guide, place the .NET library files in the same directory as the compiled form of your application. All of the examples are written in C# and each is embedded in an ASP.NET page; the same logic and code would be applicable with other .NET applications (WinForm or console applications, for example). + +Please create and save the following `web.config` file in the same directory as the sample code. The examples make use of the `DB_CONN_STRING` key from this configuration file to return a connection string from the Advanced Server host. + +```text + + + + + + +``` + +An Advanced Server connection string for an ASP.NET web application is stored in the `web.config` file. If you are writing an application that does not use ASP.NET, provide the connection information in an application configuration file (such as `app.config`). diff --git a/product_docs/docs/net_connector/4.0.10.2/06_opening_a_database_connection.mdx b/product_docs/docs/net_connector/4.0.10.2/06_opening_a_database_connection.mdx new file mode 100644 index 00000000000..f1c1a1e4b2c --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/06_opening_a_database_connection.mdx @@ -0,0 +1,288 @@ +--- +title: "Opening a Database Connection" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/opening_a_database_connection.html" +--- + + + +An `EDBConnection` object is responsible for handling the communication between an instance of Advanced Server and a .NET application. Before you can access data stored in an Advanced Server database, you must create and open an `EDBConnection`. + +The examples that follow demonstrate the basic steps for connecting to an instance of Advanced Server. You must: + +1. Import the namespace `EnterpriseDB.EDBClient`. +2. Create an instance of `EDBConnection`. +3. Initialize the `EDBConnection` object by passing a connection string as a parameter to the constructor for the `EDBConnection` class. +4. Call the `Open` method of the `EDBConnection` object to open the connection. + + + +## Connection String Parameters + +A valid connection string should specify location and authentication information for an Advanced Server instance. You must provide the connection string before opening the connection. A connection string must contain: + +- The name or IP address of the server +- The name of the Advanced Server database +- The name of an Advanced Server user +- The password associated with that user + +The following parameters may be included in the connection string: + +`CommandTimeout` + + `CommandTimeout` specifies the length of time (in seconds) to wait for a command to finish execution before throwing an exception. The default value is `20`. + +`ConnectionLifeTime` + + Use `ConnectionLifeTime` to specify the length of time (in seconds) to wait before closing unused connections in the pool. The default value is `15`. + +`Database` + + Use the `Database` parameter to specify the name of the database to which the application should connect. If a database name is not specified, the database name will default to the name of the connecting user. + +`Encoding` + + The `Encoding` parameter is obsolete; the parameter always returns the string unicode, and silently ignores attempts to set it. + +`Integrated Security` + + By default, `Integrated Security` is set to `false`, and Windows Integrated Security is disabled. Specify a value of `true` to use Windows Integrated Security. + +`Load Role Based Tables` + + Use `Load Role Based Tables` to load table OIDs based on role. This change only impacts the loading of table type OID, and not the composite type. The default value is `false`. Setting this parameter to `true` triggers the new functionality. + +`MaxPoolSize` + + `MaxPoolSize` instructs `EDBConnection` to dispose of pooled connections when the pool exceeds the specified number of connections. The default value is `20`. + +`MinPoolSize` + + `MinPoolSize` instructs `EDBConnection` to pre-allocate the specified number of connections with the server. The default value is `1`. + +`Password` + + When using clear text authentication, specify the password that will be used to establish a connection with the server. + +`Pooling` + + By default, `Pooling` is set to `true` to enable connection pooling. Specify a value of `false` to disable connection pooling. + +`Port` + + The `Port` parameter specifies the port to which the application should connect. + +`Protocol` + + The specific protocol version to use (instead of automatic); specify an integer value of 2 or 3. + +`SearchPath` + + Use the `SearchPath` parameter to change the search path to named and public schemas. + +`Server` + + The name or IP address of the Advanced Server host. + +`SSL` + + By default, `SSL` is set to `false`; specify a value of `true` to attempt a secure connection. + +`sslmode` + + Use `sslmode` to specify an SSL connection control preference. `sslmode` can be: + + `prefer` - Use SSL if possible. + + `require` - Throw an exception if an SSL connection cannot be established. + + `allow` - Connect without SSL. This parameter is not supported. + + `disable` - Do not attempt an SSL connection. This is the default behavior. + +`SyncNotification` + + Use the `SyncNotification` parameter to specify that `EDBDataprovider` should use synchronous notifications. The default value is `false`. + +`Timeout` + + `Timeout` specifies the length of time (in seconds) to wait for an open connection. The default value is `15`. + +`User Id` + + The `User Id` parameter specifies the user name that should be used for the connection. + +## Example - Opening a Database Connection using ASP.NET + +The following example demonstrates how to open a connection to an instance of Advanced Server and then close the connection. The connection is established using the credentials specified in the `DB_CONN_STRING` configuration parameter (see [Using the .Net Connector](05_using_the_net_connector/#using_the_net_connector) for an introduction to connection information and also see [Connection String Parameters](#connection-string-parameters) for connection parameters). + +```Text +<% @ Page Language="C#" %> +<% @Import Namespace="EnterpriseDB.EDBClient" %> +<% @Import Namespace="System.Configuration" %> + + +``` + +If the connection is successful, a browser will display the following: + +![Connection Opened Successfully](images/connection_opened_successfully.png) + +Connection Opened Successfully + +## Example - Opening a Database Connection from a Console Application + +The following example opens a connection with an Advanced Server database using a console-based application. + +Before writing the code for the console application, create an `app.config` file that stores the connection string to the database. Using a configuration file makes it convenient to update the connection string if the information changes. + +```Text + + + + + + +``` + +Using your text editor of choice, enter the following code sample into a file: + +```Text +using System; +using System.Data; +using EnterpriseDB.EDBClient; +using System.Configuration; + +namespace EnterpriseDB +{ + + class EDB + { + + static void Main(string[] args) + { + string strConnectionString = ConfigurationSettings.AppSettings + ["DB_CONN_STRING"]; + + EDBConnection conn = new EDBConnection(strConnectionString); + + try + { + conn.Open(); + Console.WriteLine("Connection Opened Successfully"); + } + + catch(Exception exp) + { + throw new Exception(exp.ToString()); + } + + finally + { + conn.Close(); + } + } + } +} +``` + +Save the file as `EDBConnection-Sample.cs` and compile it with the following command: + +`csc /r:EDBDataProvider.dll /out:Console.exe EDBConnection-Sample.cs` + +Compiling the sample should generate a `Console.exe` file; you can execute the sample code by entering `Console.exe`. When executed, the console should verify that the: + + `Connection Opened Successfully` + +## Example - Opening a Database Connection from a Windows Form Application + +The following example demonstrates opening a database connection using a .NET WinForm application. To use the example, save the following code as `WinForm-Example.cs` in a directory that contains the library files. + +```Text +using System; +using System.Windows.Forms; +using System.Drawing; +using EnterpriseDB.EDBClient; + +namespace EDBTestClient +{ + + class Win_Conn + { + static void Main(string[] args) + { + Form frmMain = new Form(); + Button btnConn = new Button(); + btnConn.Location = new System.Drawing.Point(104, 64); + btnConn.Name = "btnConn"; + btnConn.Text = "Open Connection"; + btnConn.Click += new System.EventHandler(btnConn_Click); + + frmMain.Controls.Add(btnConn); + frmMain.Text = "EnterpriseDB"; + + Application.Run(frmMain); + } + + private static void btnConn_Click(object sender, System.EventArgs e) + { + EDBConnection conn = null; + try + { + string connectionString = "Server=10.90.1.29;port=5444; + username=edb;password=edb;database=edb"; + conn = new EDBConnection(connectionString); + conn.Open(); + MessageBox.Show("Connection Open"); + } + catch(EDBException exp) + { + MessageBox.Show(exp.ToString()); + } + finally + { + conn.Close(); + } + } + } +} +``` + +Note that you must change the database connection string to point to the database that you want to connect to before compiling the file with the following command: + +`csc /r:EnterpriseDB.EDBClient.dll /out:WinForm.exe WinForm-Example.cs` + +This command should generate a `WinForm.exe` file within the same folder that the executable was compiled under. Invoking the executable will display: + +![A successful connection](images/dialog.png) + +A successful connection diff --git a/product_docs/docs/net_connector/4.0.10.2/07_retrieving_database_records.mdx b/product_docs/docs/net_connector/4.0.10.2/07_retrieving_database_records.mdx new file mode 100644 index 00000000000..e2ee7f37fd0 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/07_retrieving_database_records.mdx @@ -0,0 +1,133 @@ +--- +title: "Retrieving Database Records" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/retrieving_database_records.html" +--- + + + +You can use a `SELECT` statement to retrieve records from the database via a `SELECT` command. To execute a `SELECT` statement you must: + +- Create and open a database connection. +- Create an `EDBCommand` object that represents the `SELECT` statement. +- Execute the command with the `ExecuteReader()` method of the `EDBCommand` object returning a `EDBDataReader` +- Loop through the `EDBDataReader` displaying the results or binding the `EDBDataReader` to some control. + +An `EDBDataReader` object represents a forward-only and read-only stream of database records, presented one record at a time. To view a subsequent record in the stream, you must call the `Read()` method of the `EDBDataReader` object. + +The example that follows: + +1. Imports the Advanced Server namespace: `EnterpriseDB.EDBClient` +2. Initializes an `EDBCommand` object with a `SELECT` statement. +3. Opens a connection to the database. +4. Executes the `EDBCommand` by calling the `ExecuteReader` method of the `EDBCommand` object. + +The results of the SQL statement are retrieved into an `EDBDataReader` object. + +Loops through the contents of the `EDBDataReader` object to display the records returned by the query within a `WHILE` loop. + +The `Read()` method advances to the next record (if a record exists) and returns `true` if a record exists, or `false` to indicate that the `EDBDataReader` has reached the end of the result set. + +```Text +<% @ Page Language="C#" %> +<% @Import Namespace="EnterpriseDB.EDBClient" %> +<% @Import Namespace="System.Data" %> +<% @Import Namespace="System.Configuration" %> + + +``` + +To exercise the sample code, save the code in your default web root directory in a file named: + + `selectEmployees.aspx` + +To invoke the program, open a web-browser, and browse to: + + `http://localhost/selectEmployees.aspx` + +## Retrieving a Single Database Record + +To retrieve a single result from a query, use the `ExecuteScalar()` method of the `EDBCommand` object. The `ExecuteScalar()` method returns the first value of the first column of the first row of the `DataSet` generated by the specified query. + +```text +<% @ Page Language="C#" %> +<% @Import Namespace="EnterpriseDB.EDBClient" %> +<% @Import Namespace="System.Data" %> +<% @Import Namespace="System.Configuration" %> + + +``` + +Save the sample code in a file in a web root directory named: + + `selectscalar.aspx` + +To invoke the sample code, open a web-browser, and browse to: + + `http://localhost/selectScalar.aspx` + +Please note that the sample includes an explicit conversion of the value returned by the `ExecuteScalar()` method. The `ExecuteScalar()` method returns an object; to view the object, you must convert it into an integer value by using the `Convert.ToInt32` method. diff --git a/product_docs/docs/net_connector/4.0.10.2/08_parameterized_queries.mdx b/product_docs/docs/net_connector/4.0.10.2/08_parameterized_queries.mdx new file mode 100644 index 00000000000..44ee90f9b91 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/08_parameterized_queries.mdx @@ -0,0 +1,66 @@ +--- +title: "Parameterized Queries" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/parameterized_queries.html" +--- + + + +A `parameterized query` is a query with one or more parameter markers embedded in the SQL statement. Before executing a parameterized query, you must supply a value for each marker found in the text of the SQL statement. + +Parameterized queries are useful when you don't know the complete text of a query at the time you write your code. For example, the value referenced in a `WHERE` clause may be calculated from user input. + +As demonstrated in the following example, you must declare the data type of each parameter specified in the parameterized query by creating an `EDBParameter` object and adding that object to the command's parameter collection. Then, you must specify a `value` for each parameter by calling the parameter's `Value()` function. + +The example demonstrates use of a parameterized query with an `UPDATE` statement that increases an employee salary: + +```text +<% @ Page Language="C#" Debug="true"%> +<% @Import Namespace="EnterpriseDB.EDBClient" %> +<% @Import Namespace="System.Data" %> +<% @Import Namespace="System.Configuration" %> + + +``` + +Save the sample code in a file in a web root directory named: + + `updateSalary.aspx` + +To invoke the sample code, open a web-browser, and browse to: + + `http://localhost/updateSalary.aspx` diff --git a/product_docs/docs/net_connector/4.0.10.2/09_inserting_records_in_a_database.mdx b/product_docs/docs/net_connector/4.0.10.2/09_inserting_records_in_a_database.mdx new file mode 100644 index 00000000000..3496b2cbfa5 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/09_inserting_records_in_a_database.mdx @@ -0,0 +1,71 @@ +--- +title: "Inserting Records in a Database" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/inserting_records_in_a_database.html" +--- + + + +You can use the `ExecuteNonQuery()` method of `EDBCommand` to add records to a database stored on an Advanced Server host with an `INSERT` command. + +In the example that follows, the `INSERT` command is stored in the variable cmd. The values prefixed with a colon (`:`) are placeholders for `EDBParameters` that are instantiated, assigned values, and then added to the `INSERT` command's parameter collection in the statements that follow. The `INSERT` command is executed by the `ExecuteNonQuery()` method of the `cmdInsert` object. + +The example adds a new employee to the `emp` table: + +```Text +<% @ Page Language="C#" Debug="true"%> +<% @Import Namespace="EnterpriseDB.EDBClient" %> +<% @Import Namespace="System.Data" %> +<% @Import Namespace="System.Configuration" %> + + +``` + +Save the sample code in a file in a web root directory named: + + `insertEmployee.aspx` + +To invoke the sample code, open a web-browser, and browse to: + + `http://localhost/insertEmployee.aspx` diff --git a/product_docs/docs/net_connector/4.0.10.2/10_deleting_records_in_a_database.mdx b/product_docs/docs/net_connector/4.0.10.2/10_deleting_records_in_a_database.mdx new file mode 100644 index 00000000000..652cad46ec1 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/10_deleting_records_in_a_database.mdx @@ -0,0 +1,72 @@ +--- +title: "Deleting Records in a Database" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/deleting_records_in_a_database.html" +--- + + + +You can use the `ExecuteNonQuery()` method of `EDBCommand` to delete records from a database stored on an Advanced Server host with a `DELETE` statement. + +In the example that follows, the `DELETE` command is stored in the variable `strDeleteQuery`. The code passes the employee number to the Delete command (specified by: `EmpNo`). The command is then executed using the `ExecuteNonQuery()` method. The following example deletes the employee inserted in the previous example: + +```text +<% @ Page Language="C#" Debug="true"%> +<% @Import Namespace="EnterpriseDB.EDBClient" %> +<% @Import Namespace="System.Data" %> +<% @Import Namespace="System.Configuration" %> + + +``` + +Save the sample code in a file in a web root directory named: + + `deleteEmployee.aspx` + +To invoke the sample code, open a web-browser, and browse to: + + `http://localhost/deleteEmployee.aspx` diff --git a/product_docs/docs/net_connector/4.0.10.2/11_using_spl_stored_procedures_in_your_net_application.mdx b/product_docs/docs/net_connector/4.0.10.2/11_using_spl_stored_procedures_in_your_net_application.mdx new file mode 100644 index 00000000000..51667d73a0f --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/11_using_spl_stored_procedures_in_your_net_application.mdx @@ -0,0 +1,420 @@ +--- +title: "Using SPL Stored Procedures in your .NET Application" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/using_spl_stored_procedures_in_your_net_application.html" +--- + + + +You can include SQL statements in an application in two ways: + +- By adding the SQL statements directly in the .NET application code. +- By packaging the SQL statements in a stored procedure, and executing the stored procedure from the .NET application. + +In some cases, a stored procedure can provide advantages over embedded SQL statements. Stored procedures support complex conditional and looping constructs that are difficult to duplicate with SQL statements embedded directly in an application. + +You can also see a significant improvement in performance by using stored procedures; a stored procedure only needs to be parsed, compiled and optimized once on the server side, while a SQL statement that is included in an application may be parsed, compiled and optimized each time it is executed from a .NET application. + +To use a stored procedure in your .NET application you must: + +1. Create an SPL stored procedure on the Advanced Server host. +2. Import the `EnterpriseDB.EDBClient` namespace. +3. Pass the name of the stored procedure to the instance of the `EDBCommand`. +4. Change the `EDBCommand.CommandType` to `CommandType.StoredProcedure`. +5. `Prepare()` the command. +6. Execute the command. + +## Example - Executing a Stored Procedure without Parameters + +Our sample procedure prints the name of department 10; the procedure takes no parameters, and returns no parameters. To create the sample procedure, invoke EDB-PSQL and connect to the Advanced Server host database. Enter the following SPL code at the command line: + +```Text +CREATE OR REPLACE PROCEDURE list_dept10 +IS + v_deptname VARCHAR2(30); +BEGIN + DBMS_OUTPUT.PUT_LINE('Dept No: 10'); + SELECT dname INTO v_deptname FROM dept WHERE deptno = 10; + DBMS_OUTPUT.PUT_LINE('Dept Name: ' || v_deptname); +END; +``` + +When Advanced Server has validated the stored procedure it will echo `CREATE PROCEDURE`. + +**Using the EDBCommand Object to Execute a Stored Procedure** + +The `CommandType` property of the `EDBCommand` object is used to indicate the type of command being executed. The `CommandType` property is set to one of three possible `CommandType` enumeration values: + +- Use the default `Text` value when passing a SQL string for execution. +- Use the `StoredProcedure` value, passing the name of a stored procedure for execution. +- Use the `TableDirect` value when passing a table name. This value passes back all records in the specified table. + +The `CommandText` property must contain a SQL string, stored procedure name, or table name depending on the value of the `CommandType` property. + +The following example executes the stored procedure: + +```Text +<% @ Page Language="C#" Debug="true"%> +<% @Import Namespace="EnterpriseDB.EDBClient" %> +<% @Import Namespace="System.Data" %> +<% @Import Namespace="System.Configuration" %> + + +``` + +Save the sample code in a file in a web root directory named: + + `storedProc.aspx` + +To invoke the sample code, open a web-browser, and browse to: + + `http://localhost/storedProc.aspx` + +## Example - Executing a Stored Procedure with IN Parameters + +The following example demonstrates calling a stored procedure that includes `IN` parameters. To create the sample procedure, invoke `EDB-PSQL` and connect to the Advanced Server host database. Enter the following SPL code at the command line: + +```Text +CREATE OR REPLACE PROCEDURE + EMP_INSERT + ( + pENAME IN VARCHAR, + pJOB IN VARCHAR, + pSAL IN FLOAT4, + pCOMM IN FLOAT4, + pDEPTNO IN INTEGER, + pMgr IN INTEGER + ) +AS +DECLARE + CURSOR TESTCUR IS SELECT MAX(EMPNO) FROM EMP; + MAX_EMPNO INTEGER := 10; +BEGIN + + OPEN TESTCUR; + FETCH TESTCUR INTO MAX_EMPNO; + INSERT INTO EMP(EMPNO,ENAME,JOB,SAL,COMM,DEPTNO,MGR) + VALUES(MAX_EMPNO+1,pENAME,pJOB,pSAL,pCOMM,pDEPTNO,pMgr); + CLOSE testcur; +END; +``` + +When Advanced Server has validated the stored procedure it wEDBTypes.EDBDbType.Floatill echo `CREATE PROCEDURE`. + +**Passing Input Values to a Stored Procedure** + +Calling a stored procedure that contains parameters is very similar to executing a stored procedure without parameters. The major difference is that when calling a parameterized stored procedure you must use the `EDBParameter` collection of the `EDBCommand` object. When the `EDBParameter` is added to the `EDBCommand` collection, properties such as `ParameterName`, `DbType`, `Direction`, `Size`, and `Value` are set. + +The following example demonstrates the process of executing a parameterized stored procedure from a C#. + +```Text +<% @ Page Language="C#" Debug="true"%> +<% @Import Namespace="EnterpriseDB.EDBClient" %> +<% @Import Namespace="System.Data" %> +<% @Import Namespace="System.Configuration" %> + + + + +``` + +Save the sample code in a file in a web root directory named: + + `storedProcInParam.aspx` + +To invoke the sample code, open a web-browser, and browse to: + + `http://localhost/storedProcInParam.aspx` + +In the example, the body of the `Page_Load` method declares and instantiates an `EDBConnection` object. The sample then creates an `EDBCommand` object with the properties needed to execute the stored procedure. + +The example then uses the `Add` method of the `EDBCommand Parameter` collection to add six input parameters. + +```text +EDBCommand cmdStoredProc = new EDBCommand +("emp_insert(:EmpName,:Job,:Salary,:Commission,:DeptNo,:Manager)",conn); +cmdStoredProc.CommandType = CommandType.StoredProcedure; +``` + +It assigns a value to each parameter before passing them to the `EMP_INSERT` stored procedure + +The `Prepare()` method prepares the statement before calling the `ExecuteNonQuery()` method. + +The `ExecuteNonQuery` method of the `EDBCommand` object executes the stored procedure. After the stored procedure has executed, a test record is inserted into the `emp` table and the values inserted are displayed on the webpage. + +## Example - Executing a Stored Procedure with IN, OUT, and INOUT Parameters + +The previous example demonstrated how to pass `IN` parameters to a stored procedure; the following examples demonstrate how to pass `IN` values and return `OUT` values from a stored procedure. + +**Creating the Stored Procedure** + +The following stored procedure passes the department number, and returns the corresponding location and department name. To create the sample procedure, open the EDB-PSQL command line, and connect to the Advanced Server host database. Enter the following SPL code at the command line: + +```Text +CREATE OR REPLACE PROCEDURE + DEPT_SELECT + ( + pDEPTNO IN INTEGER, + pDNAME OUT VARCHAR, + pLOC OUT VARCHAR + ) +AS +DECLARE + CURSOR TESTCUR IS SELECT DNAME,LOC FROM DEPT; + REC RECORD; +BEGIN + + OPEN TESTCUR; + FETCH TESTCUR INTO REC; + + pDNAME := REC.DNAME; + pLOC := REC.LOC; + + CLOSE testcur; +END; +``` + +When Advanced Server has validated the stored procedure it will echo `CREATE PROCEDURE`. + +**Receiving Output Values from a Stored Procedure** + +When retrieving values from `OUT` parameters you must explicitly specify the direction of out parameters as `Output`. You can retrieve the values from `Output` parameters in two ways: + +- Call the `ExecuteReader` method of the `EDBCommand` and explicitly loop through the returned `EDBDataReader`, searching for the values of `OUT` parameters. +- Call the `ExecuteNonQuery` method of `EDBCommand` and explicitly get the value of a declared `Output` parameter by calling that `EDBParameter` value property. + +In each method, you must declare each parameter, indicating the `direction` of the parameter (`ParameterDirection.Input`, `ParameterDirection.Output` or `ParameterDirection.InputOutput`). Before invoking the procedure, you must provide a value for each `IN` and `INOUT` parameter. After the procedure returns, you may retrieve the `OUT` and `INOUT` parameter values from the `command.Parameters[]` array. + +The following code listing demonstrates using the `ExecuteReader` method to retrieve a result set: + +```Text +<% @ Page Language="C#" Debug="true"%> +<% @Import Namespace="EnterpriseDB.EDBClient" %> +<% @Import Namespace="System.Data" %> +<% @Import Namespace="System.Configuration" %> + + +``` + +The following code listing demonstrates using the `ExecuteNonQuery` method to retrieve a result set: + +```Text +<% @ Page Language="C#" Debug="true"%> +<% @Import Namespace="EnterpriseDB.EDBClient" %> +<% @Import Namespace="System.Data" %> +<% @Import Namespace="System.Configuration" %> + + +``` diff --git a/product_docs/docs/net_connector/4.0.10.2/12_using_advanced_queueing.mdx b/product_docs/docs/net_connector/4.0.10.2/12_using_advanced_queueing.mdx new file mode 100644 index 00000000000..f6a807ae432 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/12_using_advanced_queueing.mdx @@ -0,0 +1,492 @@ +--- +title: "Using Advanced Queueing" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/using_advanced_queueing.html" +--- + + + +EDB Postgres Advanced Server Advanced Queueing provides message queueing and message processing for the Advanced Server database. User-defined messages are stored in a queue; a collection of queues is stored in a queue table. You should first create a queue table before creating a queue that is dependent on it. + +On the server side, procedures in the `DBMS_AQADM` package create and manage message queues and queue tables. Use the `DBMS_AQ` package to add or remove messages from a queue, or register or unregister a PL/SQL callback procedure. For more information about `DBMS_AQ` and `DBMS_AQADM`, click [here](https://www.enterprisedb.com/docs/en/11.0/EPAS_BIP_Guide_v11/Database_Compatibility_for_Oracle_Developers_Built-in_Package_Guide.1.14.html#pID0E01HG0HA). + +On the client side, application uses EDB.NET driver to enqueue/dequeue message. + +## Enqueueing or Dequeueing a Message + +For more information about using Advanced Servers Advanced Queueing functionality, see the [Database Compatibility for Oracle Developers Built-in Package Guide](/epas/latest/). + +### Server-Side Setup + +To use Advanced Queueing functionality on your .NET application, you must first create a user defined type, queue table, and queue, and then start the queue on the database server. Invoke EDB-PSQL and connect to the Advanced Server host database. Use the following SPL commands at the command line: + +**Creating a User-defined Type** + +To specify a RAW data type, you should create a user-defined type. The following example demonstrates creating a user-defined type named as `myxml`. + +`CREATE TYPE myxml AS (value XML)`; + +**Creating the Queue Table** + +A queue table can hold multiple queues with the same payload type. The following example demonstrates creating a table named `MSG_QUEUE_TABLE`. + +```Text +EXEC DBMS_AQADM.CREATE_QUEUE_TABLE + (queue_table => 'MSG_QUEUE_TABLE', + queue_payload_type => 'myxml', + comment => 'Message queue table'); +END; +``` + +**Creating the Queue** + +The following example demonstrates creating a queue named `MSG_QUEUE` within the table `MSG_QUEUE_TABLE`. + +```Text +BEGIN +DBMS_AQADM.CREATE_QUEUE ( queue_name => 'MSG_QUEUE', queue_table => 'MSG_QUEUE_TABLE', comment => 'This queue contains pending messages.'); +END; +``` + +**Starting the Queue** + +Once the queue is created, invoke the following SPL code at the command line to start a queue in the EDB database. + +```Text +BEGIN +DBMS_AQADM.START_QUEUE +(queue_name => 'MSG_QUEUE'); +END; +``` + +### Client-side Example + +Once you have created a user-defined type, followed by queue table and queue, start the queue. Then, you can enqueue or dequeue a message using EDB .Net drivers. + +**Enqueue a message:** + +To enqueue a message on your .NET application, you must: + +1. Import the `EnterpriseDB.EDBClient` namespace. +2. Pass the name of the queue and create the instance of the `EDBAQQueue`. +3. Create the enqueue message and define a payload. +4. Call the `queue.Enqueue` method. + +The following code listing demonstrates using the `queue.Enqueue` method: + +!!! Note + The following code creates the message and serializes it. This is just an example code and is not going to compile if copied as it is. It is the responsibility of the user to serialize the message as XML. + +```Text +using EnterpriseDB.EDBClient; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace AQXml +{ + class MyXML + { + public string value { get; set; } + } + class Program + { + static void Main(string[] args) + { + int messagesToSend = 1; + if (args.Length > 0 && !string.IsNullOrEmpty(args[0])) + { + messagesToSend = int.Parse(args[0]); + } + for (int i = 0; i < 5; i++) + { + EnqueMsg("test message: " + i); + } + } + + private static EDBConnection GetConnection() + { + string connectionString = "Server=127.0.0.1;Host=127.0.0.1;Port=5444;User Id=enterprisedb;Password=test;Database=edb;Timeout=999"; + EDBConnection connection = new EDBConnection(connectionString); + connection.Open(); + return connection; + } + + + private static string ByteArrayToString(byte[] byteArray) + { + // Sanity check if it's null so we don't incur overhead of an exception + if (byteArray == null) + { + return string.Empty; + } + try + { + StringBuilder hex = new StringBuilder(byteArray.Length * 2); + foreach (byte b in byteArray) + { + hex.AppendFormat("{0:x2}", b); + } + + return hex.ToString().ToUpper(); + } + catch + { + return string.Empty; + } + } + + private static bool EnqueMsg(string msg) + { + EDBConnection con = GetConnection(); + using (EDBAQQueue queue = new EDBAQQueue("MSG_QUEUE", con)) + { + queue.MessageType = EDBAQMessageType.Xml; + EDBTransaction txn = queue.Connection.BeginTransaction(); + QueuedEntities.Message queuedMessage = new QueuedEntities.Message() { MessageText = msg }; + + try + { + string rootElementName = queuedMessage.GetType().Name; + if (rootElementName.IndexOf(".") != -1) + { + rootElementName = rootElementName.Split('.').Last(); + } + + string xml = new Utils.XmlFragmentSerializer().Serialize(queuedMessage); + EDBAQMessage queMsg = new EDBAQMessage(); + queMsg.Payload = new MyXML { value = xml }; + queue.MessageType = EDBAQMessageType.Udt; + queue.UdtTypeName = "myxml"; + queue.Enqueue(queMsg); + var messageId = ByteArrayToString((byte[])queMsg.MessageId); + Console.WriteLine("MessageID: " + messageId); + txn.Commit(); + queMsg = null; + xml = null; + rootElementName = null; + return true; + } + catch (Exception ex) + { + txn?.Rollback(); + Console.WriteLine("Failed to enqueue message."); + Console.WriteLine(ex.ToString()); + return false; + } + finally + { + queue?.Connection?.Dispose(); + } + } + } + + } +} +``` + +**Dequeueing a message** + +To dequeue a message on your .NET application, you must: + +1. Import the `EnterpriseDB.EDBClient` namespace. +2. Pass the name of the queue and create the instance of the `EDBAQQueue`. +3. Call the `queue.Dequeue` method. + +!!! Note + The following code creates the message and serializes it. This is just an example code and is not going to compile if copied as it is. It is the responsibility of the user to serialize the message as XML. + +```Text +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using EnterpriseDB.EDBClient; + +namespace DequeueXML +{ + class MyXML + { + public string value { get; set; } + } + class Program + { + static void Main(string[] args) + { + DequeMsg(); + } + + + private static EDBConnection GetConnection() + { + string connectionString = "Server=127.0.0.1;Host=127.0.0.1;Port=5444;User Id=enterprisedb;Password=test;Database=edb;Timeout=999"; + EDBConnection connection = new EDBConnection(connectionString); + connection.Open(); + return connection; + } + + + private static string ByteArrayToString(byte[] byteArray) + { + // Sanity check if it's null so we don't incur overhead of an exception + if (byteArray == null) + { + return string.Empty; + } + try + { + StringBuilder hex = new StringBuilder(byteArray.Length * 2); + foreach (byte b in byteArray) + { + hex.AppendFormat("{0:x2}", b); + } + + return hex.ToString().ToUpper(); + } + catch + { + return string.Empty; + } + } + public static void DequeMsg(int waitTime = 10) + { + EDBConnection con = GetConnection(); + using (EDBAQQueue queueListen = new EDBAQQueue("MSG_QUEUE", con)) + { + queueListen.UdtTypeName = "myxml"; + queueListen.DequeueOptions.Navigation = EDBAQNavigationMode.FIRST_MESSAGE; + queueListen.DequeueOptions.Visibility = EDBAQVisibility.ON_COMMIT; + queueListen.DequeueOptions.Wait = 1; + EDBTransaction txn = null; + + while (1 == 1) + { + + if (queueListen.Connection.State == System.Data.ConnectionState.Closed) + { + queueListen.Connection.Open(); + } + + string messageId = "Unknown"; + try + { + // the listen function is a blocking function. It will Wait the specified waitTime or until a + // message is received. + Console.WriteLine("Listening..."); + string v = queueListen.Listen(null, waitTime); + // If we are waiting for a message and we specify a Wait time, + // then if there are no more messages, we want to just bounce out. + if (waitTime > -1 && v == null) + { + Console.WriteLine("No message received during Wait period."); + Console.WriteLine(); + continue; + } + + // once we're here that means a message has been detected in the queue. Let's deal with it. + txn = queueListen.Connection.BeginTransaction(); + + Console.WriteLine("Attempting to dequeue message..."); + // dequeue the message + EDBAQMessage deqMsg; + try + { + deqMsg = queueListen.Dequeue(); + } + catch (Exception ex) + { + if (ex.Message.Contains("ORA-25228")) + { + Console.WriteLine("Message was not there. Another process must have picked it up."); + Console.WriteLine(); + txn.Rollback(); + continue; + } + else + { + throw; + } + } + + messageId = ByteArrayToString((byte[])deqMsg.MessageId); + if (deqMsg != null) + { + Console.WriteLine("Processing received message..."); + // process the message payload + MyXML obj = new MyXML(); + queueListen.Map(deqMsg.Payload, obj); + + QueuedEntities.Message msg = new Utils.XmlFragmentSerializer().Deserialize(obj.value); + + Console.WriteLine("Received Message:"); + Console.WriteLine("MessageID: " + messageId); + Console.WriteLine("Message: " + msg.MessageText); + Console.WriteLine("Enqueue Time" + queueListen.MessageProperties.EnqueueTime); + + txn.Commit(); + + Console.WriteLine("Finished processing message"); + Console.WriteLine(); + + } + else + { + Console.WriteLine("Message was not dequeued."); + } + } + catch (Exception ex) + { + Console.WriteLine("Failed To dequeue or process the dequeued message."); + Console.WriteLine(ex.ToString()); + Console.WriteLine(); + if (txn != null) + { + txn.Rollback(); + if (txn != null) + { + txn.Dispose(); + } + } + } + } + } + + } + } +} +``` + +## EDBAQ Classes + +The following EDBAQ classes are used in this application: + +**EDBAQDequeueMode** + +The `EDBAQDequeueMode` class lists all the dequeuer modes available. + +| **Value** | **Description** | +| ------------- | ------------------------------------------------------------- | +| Browse | Read the message without locking. | +| Locked | Reads and gets a write lock on the message. | +| Remove | Deletes the message after reading. This is the default value. | +| Remove_NoData | Confirms receipt of the message. | + +**EDBAQDequeueOptions** + +The `EDBAQDequeueOptions` class lists the options available when dequeuing a message. + +| **Property** | **Description** | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| ConsumerName | The name of the consumer for which to dequeue the message. | +| DequeueMode | This is set from EDBAQDequeueMode. It represents the locking behavior linked with the dequeue option. | +| Navigation | This is set from EDBAQNavigationMode. It represents the position of the message that will be fetched. | +| Visibility | This is set from EDBAQVisibility. It represents whether the new message is dequeued or not as part of the current transaction. | +| Wait | The wait time for a message as per the search criteria. | +| Msgid | The message identifier. | +| Correlation | The correlation identifier. | +| DeqCondition | The dequeuer condition. It is a Boolean expression. | +| Transformation | The transformation that will be applied before dequeuing the message. | +| DeliveryMode | The delivery mode of the dequeued message. | + +**EDBAQEnqueueOptions** + +The `EDBAQEnqueueOptions` class lists the options available when enqueuing a message. + +| **Property** | **Description** | +| ----------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| Visibility | This is set from EDBAQVisibility. It represents whether the new message is enqueued or not as part of the current transaction. | +| RelativeMsgid | The relative message identifier. | +| SequenceDeviation | The sequence when the message should be dequeued. | +| Transformation | The transformation that will be applied before enqueuing the message. | +| DeliveryMode | The delivery mode of the enqueued message. | + +**EDBAQMessage** + +The `EDBAQMessage` class lists a message to be enqueued/dequeued. + +| **Property** | **Description** | +| ------------ | -------------------------------- | +| Payload | The actual message to be queued. | +| MessageId | The ID of the queued message. | + +**EDBAQMessageProperties** + +The `EDBAQMessageProperties` lists the message properties available. + +| **Property** | **Description** | +| ---------------- | --------------------------------------------------------------------------------------------- | +| Priority | The priority of the message. | +| Delay | The duration post which the message is available for dequeuing. This is specified in seconds. | +| Expiration | The duration for which the message is available for dequeuing. This is specified in seconds. | +| Correlation | The correlation identifier. | +| Attempts | The number of attempts taken to dequeue the message. | +| RecipientList | The receipients list that overthrows the default queue subscribers. | +| ExceptionQueue | The name of the queue where the unprocessed messages should be moved. | +| EnqueueTime | The time when the message was enqueued. | +| State | The state of the message while dequeue. | +| OriginalMsgid | The message identifier in the last queue. | +| TransactionGroup | The transaction group for the dequeued messages. | +| DeliveryMode | The delivery mode of the dequeued message. | + +**EDBAQMessageState** + +The `EDBAQMessageState` class represents the state of the message during dequeue. + +| **Value** | **Description** | +| --------- | ---------------------------------------------------------- | +| Expired | The message is moved to the exception queue. | +| Processed | The message is processed and kept. | +| Ready | The message is ready to be processed. | +| Waiting | The message is in waiting state. The delay is not reached. | + +**EDBAQMessageType** + +The `EDBAQMessageType` class represents the types for payload. + +| **Value** | **Description** | +| --------- | ------------------------------------------------------------------------------------- | +| Raw | The raw message type.

Note: Currently, this payload type is not supported. | +| UDT | The user defined type message. | +| XML | The XML type message.

Note: Currently, this payload type is not supported. | + +**EDBAQNavigationMode** + +The `EDBAQNavigationMode` class represents the different types of navigation modes available. + +| **Value** | **Description** | +| ---------------- | ------------------------------------------------------------------ | +| First_Message | Returns the first available message that matches the search terms. | +| Next_Message | Returns the next available message that matches the search items. | +| Next_Transaction | Returns the first message of next transaction group. | + +**EDBAQQueue** + +The `EDBAQQueue` class represents a SQL statement to execute `DMBS_AQ` functionality on a PostgreSQL database. + +| **Property** | **Description** | +| ----------------- | --------------------------------------------------------------------------------------------- | +| Connection | The connection to be used. | +| Name | The name of the queue. | +| MessageType | The message type that is enqueued/dequeued from this queue. For example EDBAQMessageType.Udt. | +| UdtTypeName | The user defined type name of the message type. | +| EnqueueOptions | The enqueue options to be used. | +| DequeuOptions | The dequeue options to be used. | +| MessageProperties | The message properties to be used. | + +**EDBAQVisibility** + +The `EDBAQVisibility` class represents the visibility options available. + +| **Value** | **Description** | +| --------- | ----------------------------------------------------------- | +| Immediate | The enqueue/dequeue is not part of the ongoing transaction. | +| On_Commit | The enqueue/dequeue is part of the current transaction. | + +!!! Note + - To review the default options for the above parameters, click [here](https://www.enterprisedb.com/docs/en/11.0/EPAS_BIP_Guide_v11/Database_Compatibility_for_Oracle_Developers_Built-in_Package_Guide.1.14.html#pID0E01HG0HA/). + - EDB Advanced Queueing functionality uses user-defined types for calling enqueue/dequeue operations. `Server Compatibility Mode=NoTypeLoading` cannot be used with Advanced Queueing because `NoTypeLoading` will not load any user-defined types. diff --git a/product_docs/docs/net_connector/4.0.10.2/13_using_a_ref_cursor_in_a_net_application.mdx b/product_docs/docs/net_connector/4.0.10.2/13_using_a_ref_cursor_in_a_net_application.mdx new file mode 100644 index 00000000000..27a8b95622b --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/13_using_a_ref_cursor_in_a_net_application.mdx @@ -0,0 +1,110 @@ +--- +title: "Using a Ref Cursor in a .NET Application" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/using_a_ref_cursor_in_a_net_application.html" +--- + + + +A `ref cursor` is a cursor variable that contains a pointer to a query result set. The result set is determined by the execution of the `OPEN FOR` statement using the cursor variable. A cursor variable is not tied to a particular query like a static cursor. The same cursor variable may be opened a number of times with the `OPEN FOR` statement containing different queries and each time, a new result set will be created for that query and made available via the cursor variable. There are two ways to declare a cursor variable: + +- Use the `SYS_REFCURSOR` built-in data type to declare a weakly-typed ref cursor. +- Define a strongly-typed ref cursor that declares a variable of that type. + +`SYS_REFCURSOR` is a ref cursor type that allows any result set to be associated with it. This is known as a weakly-typed ref cursor. The following example is a declaration of a weakly-typed ref cursor: + + `name SYS_REFCURSOR`; + +Following is an example of a strongly-typed ref cursor: + + `TYPE IS REF CURSOR RETURN emp%ROWTYPE`; + +**Creating the Stored Procedure** + +The following sample code creates a stored procedure called `refcur_inout_callee`. To create the sample procedure, invoke EDB-PSQL and connect to the Advanced Server host database. Enter the following SPL code at the command line: + +```Text +CREATE OR REPLACE PROCEDURE + refcur_inout_callee(v_refcur IN OUT SYS_REFCURSOR) +IS +BEGIN + OPEN v_refcur FOR SELECT ename FROM emp; +END; +``` + +To use the above defined procedure from .NET code, you must specify the data type of the ref cursor being passed as an `IN` parameter, as shown in the above script. + +The following C# code uses the stored procedure to retrieve employee names from the `emp` table: + +```Text +using System; +using System.Data; +using EnterpriseDB.EDBClient; +using System.Configuration; + +namespace EDBRefCursor +{ + class EmpRefcursor + { + [STAThread] + static void Main(string[] args) + { + string strConnectionString = + ConfigurationSettings.AppSettings["DB_CONN_STRING"]; + EDBConnection conn = new EDBConnection(strConnectionString); + conn.Open(); + try + { + EDBTransaction tran = conn.BeginTransaction(); + EDBCommand command = new EDBCommand("refcur_inout_callee", + conn); + command.CommandType = CommandType.StoredProcedure; + command.Transaction = tran; + command.Parameters.Add(new EDBParameter("refCursor", + EDBTypes.EDBDbType.Refcursor, 10, "refCursor", + + ParameterDirection.InputOutput, false, 2, 2, + System.Data.DataRowVersion.Current, null)); + + command.Prepare(); + command.Parameters[0].Value = null; + + command.ExecuteNonQuery(); + String cursorName = command.Parameters[0].Value.ToString(); + command.CommandText = "fetch all in \"" + cursorName + "\""; + command.CommandType = CommandType.Text; + + EDBDataReader reader = + command.ExecuteReader(CommandBehavior.SequentialAccess); + int fc = reader.FieldCount; + while (reader.Read()) + { + for (int i = 0; i < fc; i++) + { + Console.WriteLine(reader.GetString(i)); + } + } + reader.Close(); + tran.Commit(); + } + catch (Exception ex) + { + Console.WriteLine(ex.Message.ToString()); + } + } + } +} +``` + +The following .NET code snippet displays the result on the console: + +```Text +for(int i = 0;i < fc; i++) +{ + Console.WriteLine(reader.GetString(i)); +} +``` + +Please note that you must bind the `EDBDbType.RefCursor` type in `EDBParameter()` if you are using a ref cursor parameter. diff --git a/product_docs/docs/net_connector/4.0.10.2/14_using_plugins.mdx b/product_docs/docs/net_connector/4.0.10.2/14_using_plugins.mdx new file mode 100644 index 00000000000..b74cd2e8400 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/14_using_plugins.mdx @@ -0,0 +1,210 @@ +--- +title: "Using Plugins" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/using_plugins.html" +--- + + + +EDB .Net driver plugins are introduced to support the enhanced capabilities for different data types, which are otherwise not available in .Net. The different plugins available support: + +- GeoJSON +- Json.NET +- Legacy PostGIS +- NetTopologySuite +- NodaTime +- Rawpostgis + +The plugins support the use of spatial, data/time and Json types. The following sections detail the supported frameworks and data provider installation path for these plugins. + +## GeoJSON + +If you are using the GeoJSON plugin on .NET Framework 4.5, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\GeoJSON\net45` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.GeoJSON.dll` + + `GeoJSON.Net.dll` + + `Newtonsoft.Json.dll` + +If you are using the GeoJSON plugin on .NET Framework 4.6.1, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\GeoJSON\net461` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.GeoJSON.dll` + `GeoJSON.Net.dll` + `Newtonsoft.Json.dll` + + +If you are using the GeoJSON plugin on .NET Standard 2.0, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\GeoJSON\netstandard2.0` + +The following shared library files are required: + +`EnterpriseDB.EDBClient.GeoJSON.dll` + +For detailed information about using the GeoJSON plugin, see the [Npgsql documentation](http://www.npgsql.org/doc/types/geojson.html). + +## Json.NET + +If you are using the Json.NET plugin on .NET Framework 4.5, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\Json.NET\net45` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.Json.NET.dll` + + `Newtonsoft.Json.dll` + +If you are using the Json.NET plugin on .NET Framework 4.6.1, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\Json.NET\net461` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.Json.NET.dll` + `Newtonsoft.Json.dll` + +If you are using the Json.NET plugin on .NET Standard 2.0, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\Json.NET\netstandard2.0` + +The following shared library files are required: + +`EnterpriseDB.EDBClient.Json.NET.dll` + +For detailed information about using the Json.NET plugin, see the [Npgsql documentation](http://www.npgsql.org/doc/types/jsonnet.html). + +## LegacyPostGIS + +If you are using the LegacyPostGIS plugin on .Net Framework 4.5, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\LegacyPostgis\net45` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.LegacyPostgis.dll` + +If you are using the LegacyPostGIS plugin on .Net Framework 4.6.1, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\LegacyPostgis\net461` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.LegacyPostgis.dll` + +If you are using the LegacyPostGIS plugin on .Net Standard 2.0, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\LegacyPostgis\netstandard2.0` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.LegacyPostgis.dll` + +For detailed information about using the LegacyPostGIS plugin, see the [Npgsql documentation](http://www.npgsql.org/doc/types/legacy-postgis.html). + +## NetTopologySuite + +If you are using the NetTopologySuite plugin on .Net Framework 4.5, the data provider installation path is: + + `C:\Program Files\edb\dotnet\\ plugins\NetTopologySuite\net45` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.NetTopologySuite.dll` + + `GeoAPI.dll` + + `NetTopologySuite.dll` + + `NetTopologySuite.IO.PostGis.dll` + +If you are using the NetTopologySuite plugin on .Net Framework 4.6.1, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\NetTopologySuite\net461` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.NetTopologySuite.dll` + `NetTopologySuite.dll` + `NetTopologySuite.IO.PostGis.dll` + +If you are using the NetTopologySuite plugin on .Net Standard 2.0, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\NetTopologySuite\netstandard2.0` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.NetTopologySuite.dll` + +For detailed information about using the NetTopologySuite type plugin, see the [Npgsql documentation](http://www.npgsql.org/doc/types/nts.html). + +## NodaTime + +If you are using the NodaTime plugin on .Net Framework 4.5, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\NodaTime\net45` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.NodaTime.dll` + + `NodaTime.dll` + +If you are using the NodaTime plugin on .Net Framework 4.6.1, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\NodaTime\net461` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.NodaTime.dll` + `NodaTime.dll` + +If you are using the NodaTime plugin on .Net Standard 2.0, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\NodaTime\netstandard2.0` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.NodaTime.dll` + +For detailed information about using the NodaTime plugin, see the [Npgsql documentation](http://www.npgsql.org/doc/types/nodatime.html). + +## RawPostGIS + +If you are using the RawPostGIS plugin on .Net Framework 4.5, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\RawPostgis\net45` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.RawPostgis.dll` + +If you are using the RawPostGIS plugin on .Net Framework 4.6.1, the data provider installation path is: + + `C:\Program Files\edb\dotnet\plugins\RawPostgis\net461` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.RawPostgis.dll` + + +If you are using the RawPostGIS type plugin on .Net Standard 2.0, the data provider installation path is: + + `C:\Program\Files\edb\dotnet\plugins\RawPostGis\netstandard2.0` + +The following shared library files are required: + + `EnterpriseDB.EDBClient.RawPostgis.dll` + +For detailed information about using the RawPostGIS plugin, see the [documentation](https://www.nuget.org/packages/Npgsql.RawPostgis). diff --git a/product_docs/docs/net_connector/4.0.10.2/15_using_object_types.mdx b/product_docs/docs/net_connector/4.0.10.2/15_using_object_types.mdx new file mode 100644 index 00000000000..124df13ec48 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/15_using_object_types.mdx @@ -0,0 +1,190 @@ +--- +title: "Using Object Types in .NET" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/using_object_types.html" +--- + + + +The SQL `CREATE TYPE` command is used to create a user-defined `object type`, which is stored in the Advanced Server database. + +These user-defined types can then be referenced within SPL procedures, SPL functions, and .NET programs. + +The basic object type is created with the `CREATE TYPE AS OBJECT` command along with optional usage of the `CREATE TYPE BODY` command. + +An example that demonstrates using an object type is shown in the following section. + +## Using an Object Type + +To use an object type, you must first create the object type in the Advanced Server database. Object type `addr_object_type` defines the attributes of an address: + +```text +CREATE OR REPLACE TYPE addr_object_type AS OBJECT +( + street VARCHAR2(30), + city VARCHAR2(20), + state CHAR(2), + zip NUMBER(5) +); +``` + +Object type `emp_obj_typ` defines the attributes of an employee. Note that one of these attributes is object type `ADDR_OBJECT_TYPE` as previously described. The object type body contains a method that displays the employee information: + +```text +CREATE OR REPLACE TYPE emp_obj_typ AS OBJECT +( + empno NUMBER(4), + ename VARCHAR2(20), + addr ADDR_OBJECT_TYPE, + MEMBER PROCEDURE display_emp(SELF IN OUT emp_obj_typ) +); + +CREATE OR REPLACE TYPE BODY emp_obj_typ AS + MEMBER PROCEDURE display_emp (SELF IN OUT emp_obj_typ) + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('Employee No : ' || SELF.empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || SELF.ename); + DBMS_OUTPUT.PUT_LINE('Street : ' || SELF.addr.street); + DBMS_OUTPUT.PUT_LINE('City/State/Zip: ' || SELF.addr.city || ', ' || + SELF.addr.state || ' ' || LPAD(SELF.addr.zip,5,'0')); + END; +END; +``` + +The following listing is a complete .NET program that uses these user-defined object types: + +```text +namespace ObjectTypesSample +{ + class Program + { + static void Main(string[] args) + { + EDBConnection.GlobalTypeMapper.MapComposite("enterprisedb.addr_object_type"); + EDBConnection.GlobalTypeMapper.MapComposite("enterprisedb.emp_obj_typ"); + EDBConnection conn = new EDBConnection("Server=localhost;Port=5444;database=test;User ID=enterprisedb;password=;"); + + try + { + conn.Open(); + + EDBCommand cmd = new EDBCommand("emp_obj_typ.display_emp", conn); + + cmd.CommandType = System.Data.CommandType.StoredProcedure; + EDBCommandBuilder.DeriveParameters(cmd); + + addr_object_type address = new addr_object_type() + { + street = "123 MAIN STREET", + city = "EDISON", + state = "NJ", + zip = 8817 + }; + + emp_obj_typ emp = new emp_obj_typ() + { + empno = 9001, + ename = "JONES", + addr = address + }; + cmd.Parameters[0].Value = emp; + cmd.Prepare(); + cmd.ExecuteNonQuery(); + + emp_obj_typ empOut = (emp_obj_typ)cmd.Parameters[0].Value; + Console.WriteLine("Emp No: " + empOut.empno); + Console.WriteLine("Emp Name: " + empOut.ename); + Console.WriteLine("Emp Address Street: " + empOut.addr.street); + Console.WriteLine("Emp Address City: " + empOut.addr.city); + Console.WriteLine("Emp Address State: " + empOut.addr.state); + Console.WriteLine("Emp Address Zip: " + empOut.addr.zip); + Console.WriteLine("Emp No: " + empOut.empno); + } + catch (EDBException exp) + { + Console.WriteLine(exp.Message.ToString()); + } + finally + { + conn.Close(); + } + } + } + + public class addr_object_type + { + public string street; + public string city; + public string state; + public decimal zip; + } + + public class emp_obj_typ + { + public decimal empno; + public string ename; + public addr_object_type addr; + } +} +``` + +The following .NET types are defined to map to the types in Advanced Server: + +```text +public class addr_object_type +{ + public string street; + public string city; + public string state; + public decimal zip; +} + +public class emp_obj_typ +{ + public decimal empno; + public string ename; + public addr_object_type addr; +} +``` + +A call to `EDBConnection.GlobalTypeMapper.MapComposite` maps the .NET type to the Advanced Server types: + +```text +EDBConnection.GlobalTypeMapper.MapComposite("enterprisedb.addr_object_type"); + EDBConnection.GlobalTypeMapper.MapComposite("enterprisedb.emp_obj_typ"); +``` + +A call to `EDBCommandBuilder.DeriveParameters()` gets parameter information for a stored procedure. This allows you to just set the parameter values and call the stored procedure: + +```text +EDBCommandBuilder.DeriveParameters(cmd); +``` + +The value of the parameter is set by creating an object of the .NET type and assigning it to the `Value` property of the parameter: + +```text +addr_object_type address = new addr_object_type() +{ + street = "123 MAIN STREET", + city = "EDISON", + state = "NJ", + zip = 8817 +}; + +emp_obj_typ emp = new emp_obj_typ() +{ + empno = 9001, + ename = "JONES", + addr = address +}; +cmd.Parameters[0].Value = emp; +``` + +A call to `cmd.ExecuteNonQuery()` executes the call to the `display_emp()` method: + +```text +cmd.ExecuteNonQuery(); +``` diff --git a/product_docs/docs/net_connector/4.0.10.2/16_scram_compatibility.mdx b/product_docs/docs/net_connector/4.0.10.2/16_scram_compatibility.mdx new file mode 100644 index 00000000000..84174740144 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/16_scram_compatibility.mdx @@ -0,0 +1,13 @@ +--- +title: "Scram Compatibility" +legacyRedirects: + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/security_and_encryption.html" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/scram_compatibility.html" +--- + + + +The EDB .NET driver provides SCRAM-SHA-256 support for Advanced Server versions 12, 11 and 10. This support is available from EDB .NET 4.0.2.1 release onwards. \ No newline at end of file diff --git a/product_docs/docs/net_connector/4.0.10.2/17_advanced_server_net_connector_logging.mdx b/product_docs/docs/net_connector/4.0.10.2/17_advanced_server_net_connector_logging.mdx new file mode 100644 index 00000000000..ea7fe3ffb6f --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/17_advanced_server_net_connector_logging.mdx @@ -0,0 +1,107 @@ +--- +title: "EDB .NET Connector Logging" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/advanced_server_net_connector_logging.html" +--- + + + +EDB .NET Connector supports the use of logging to help resolve issues with the .NET Connector when used in your application. The connector uses classes in the `EnterpriseDB.EDBClient.Logging` namespace for logging. + +**Console Logging to Standard Error** + +`EnterpriseDB.EDBClient.Logging.ConsoleLoggingProvider` is a class that outputs error messages to `STDERR`. To use this class, include the following line in your application before using any of the Advanced Server .NET Connector APIs. + +`EnterpriseDB.EDBClient.Logging.EDBLogManager.Provider = new EnterpriseDB.EDBClient.Logging.ConsoleLoggingProvider(EDBLogLevel.Debug, true, true);` + +The following log levels are available: + +- Trace +- Debug +- Info +- Warn +- Error +- Fatal + +**Writing a Custom Logger** + +If the console logging provider does not fulfill your requirements, you can write a custom logger by implementing the `EnterpriseDB.EDBClient.Logging.IEDBLoggingProvider` interface, and extending the `EnterpriseDB.EDBClient.Logging.EDBLogger` class, for instance, writing your logs to a file. The following is a simple example of how to write a custom logger: + +```text +public class MyLoggingProvider : IEDBLoggingProvider + { + string _logFile; + readonly EDBLogLevel _minLevel; + readonly bool _printLevel; + readonly bool _printConnectorId; + + public MyLoggingProvider(string logFile, EDBLogLevel minLevel = EDBLogLevel.Info, bool printLevel = false, bool printConnectorId = false) + { + _logFile = logFile; + _minLevel = minLevel; + _printLevel = printLevel; + _printConnectorId = printConnectorId; + } + + public EDBLogger CreateLogger(string name) + { + return new MyLogger(_logFile, _minLevel, _printLevel, _printConnectorId); + } + } + + class MyLogger : EDBLogger + { + string _logFile; + readonly EDBLogLevel _minLevel; + readonly bool _printLevel; + readonly bool _printConnectorId; + + internal MyLogger(string logFile, EDBLogLevel minLevel, bool printLevel, bool printConnectorId) + { + _logFile = logFile; + _minLevel = minLevel; + _printLevel = printLevel; + _printConnectorId = printConnectorId; + } + + public override bool IsEnabled(EDBLogLevel level) => level >= _minLevel; + + public override void Log(EDBLogLevel level, int connectorId, string msg, Exception exception = null) + { + if (!IsEnabled(level)) + return; + + + using (StreamWriter writer = new StreamWriter(_logFile, true)) + { + var sb = new StringBuilder(); + if (_printLevel) + { + sb.Append(level.ToString().ToUpper()); + sb.Append(' '); + } + + if (_printConnectorId && connectorId != 0) + { + sb.Append("["); + sb.Append(connectorId); + sb.Append("] "); + } + + sb.AppendLine(msg); + + if (exception != null) + sb.AppendLine(exception.ToString()); + + writer.Write(sb.ToString()); + } + + } + } +``` + +To use this custom logger, put the following line in your application before using any of the EDB .NET Connector APIs: + +`EDBLogManager.Provider = new MyLoggingProvider(filepath, EDBLogLevel.Debug, true, true);` diff --git a/product_docs/docs/net_connector/4.0.10.2/18_api_reference.mdx b/product_docs/docs/net_connector/4.0.10.2/18_api_reference.mdx new file mode 100644 index 00000000000..2eb61b0f57b --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/18_api_reference.mdx @@ -0,0 +1,16 @@ +--- +title: "API Reference" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.1.6.1/api_reference.html" +--- + + + +For information about using the API, see the [Npgsql documentation](http://www.npgsql.org/doc/api/Npgsql.html). + +Usage notes: + +- When using the API, replace references to `Npgsql` with `EnterpriseDB.EDBClient`. +- When referring to classes, replace `Npgsql` with `EDB`. For example, use the `EDBBinaryExporter` class instead of the `NpgsqlBinaryExporter` class. diff --git a/product_docs/docs/net_connector/4.0.10.2/images/connection_opened_successfully.png b/product_docs/docs/net_connector/4.0.10.2/images/connection_opened_successfully.png new file mode 100755 index 00000000000..bab12126d20 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/images/connection_opened_successfully.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:537e7edecce3882b794110e0ffc644a51cab69ba2c7e7f82a3e99d32c4b4ba65 +size 22683 diff --git a/product_docs/docs/net_connector/4.0.10.2/images/dialog.png b/product_docs/docs/net_connector/4.0.10.2/images/dialog.png new file mode 100755 index 00000000000..19cba54d1f9 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/images/dialog.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f922e6cd4e43927708b5f460f5389a5b3a41dd70f3a5394723e6aee7d710f1ae +size 9048 diff --git a/product_docs/docs/net_connector/4.0.10.2/images/dotnet_installation_complete.png b/product_docs/docs/net_connector/4.0.10.2/images/dotnet_installation_complete.png new file mode 100755 index 00000000000..3a8d3a0eb02 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/images/dotnet_installation_complete.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:623cecb40b7c1ce26641ee9072c916553e2b79efba8ffe276b90bf5203cf7e3c +size 86308 diff --git a/product_docs/docs/net_connector/4.0.10.2/images/dotnet_installation_dialog.png b/product_docs/docs/net_connector/4.0.10.2/images/dotnet_installation_dialog.png new file mode 100755 index 00000000000..b465d536032 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/images/dotnet_installation_dialog.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:994a725f061f1b51fd92ccc2df5abd9066a1cf4ea7600611ae57ebc6cc59af20 +size 51144 diff --git a/product_docs/docs/net_connector/4.0.10.2/images/dotnet_installation_wizard.png b/product_docs/docs/net_connector/4.0.10.2/images/dotnet_installation_wizard.png new file mode 100755 index 00000000000..99b731c9688 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/images/dotnet_installation_wizard.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d74f5e2ba8dc157d45d7d6560c152e8b0039092d3de4363403cd0c4a5ffb4112 +size 80007 diff --git a/product_docs/docs/net_connector/4.0.10.2/images/ready_to_install.png b/product_docs/docs/net_connector/4.0.10.2/images/ready_to_install.png new file mode 100755 index 00000000000..59e44d96bc5 --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/images/ready_to_install.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91b18bef7b78a6dae7d6b664e2bccfbfdb4248dbd034cb59e2c6a35ada7da49c +size 44080 diff --git a/product_docs/docs/net_connector/4.0.10.2/images/selecting_the_connectors_installer.png b/product_docs/docs/net_connector/4.0.10.2/images/selecting_the_connectors_installer.png new file mode 100755 index 00000000000..e322dd834ec --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/images/selecting_the_connectors_installer.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70c0ffedddbd8c5972853ce2fc6fee88e2a398594e01b90d00012f0c0956846a +size 98220 diff --git a/product_docs/docs/net_connector/4.0.10.2/images/starting_stackbuilder_plus.png b/product_docs/docs/net_connector/4.0.10.2/images/starting_stackbuilder_plus.png new file mode 100755 index 00000000000..392aaa87fdc --- /dev/null +++ b/product_docs/docs/net_connector/4.0.10.2/images/starting_stackbuilder_plus.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0756f2b14caa14049f52660d1a25f60b35f666e70241c17d78c2e63743c3b074 +size 103715 diff --git a/product_docs/docs/net_connector/4.0.10.2/index.mdx b/product_docs/docs/net_connector/4.0.10.2/index.mdx index 95e084a45eb..e8303b3449e 100644 --- a/product_docs/docs/net_connector/4.0.10.2/index.mdx +++ b/product_docs/docs/net_connector/4.0.10.2/index.mdx @@ -13,4 +13,16 @@ legacyRedirectsGenerated: - "/edb-docs/d/edb-postgres-net-connector/user-guides/net-guide/4.0.10.2/index.html" --- - +The EDB .NET Connector distributed with EDB Postgres Advanced Server (Advanced Server) provides connectivity between a .NET client application and an Advanced Server database server. This guide provides installation instructions, usage instructions, and examples that demonstrate the functionality of the EDB .NET Connector: + +- How to connect to an instance of Advanced Server. +- How to retrieve information from an Advanced Server database. +- How to update information stored on an Advanced Server database. + +This document assumes that you have a solid working knowledge of both C# and .NET. The EDB .NET Connector functionality is built on the core functionality of the Npgsql open source project. The *Npgsql User's Manual* is available [online](http://www.npgsql.org/doc/index.html). + +
+ +whats_new requirements_overview the_advanced_server_net_connector_overview installing_and_configuring_the_net_connector using_the_net_connector opening_a_database_connection retrieving_database_records parameterized_queries inserting_records_in_a_database deleting_records_in_a_database using_spl_stored_procedures_in_your_net_application using_advanced_queueing using_a_ref_cursor_in_a_net_application using_plugins using_object_types scram_compatibility advanced_server_net_connector_logging api_reference conclusion + +
diff --git a/scripts/normalize/lib/relativelinks.js b/scripts/normalize/lib/relativelinks.js index d3992500932..6e8190477f3 100644 --- a/scripts/normalize/lib/relativelinks.js +++ b/scripts/normalize/lib/relativelinks.js @@ -130,10 +130,15 @@ const index = { keyId: (id) => { - return '#' + slugger.slug(decodeURIComponent(id.replace(/^[^#]*#+/, '')) - .replace(/[_-]/g, "") - .replace(/\s+/g, "")) - .toLowerCase(); + let key = id.replace(/^[^#]*#+/, ''); + try + { + // may or may not be encoded properly; try to decode, but continue if not possible + key = decodeURIComponent(key); + } catch {} + key = key.replace(/[_-]/g, "") + .replace(/\s+/g, "") + return '#' + slugger.slug(key).toLowerCase(); }, valuePathId: (filepath, id, product, version) => { diff --git a/scripts/source/source_cloud_native_operator.py b/scripts/source/source_cloud_native_operator.py index 918bf4579ca..63c7c0ada85 100644 --- a/scripts/source/source_cloud_native_operator.py +++ b/scripts/source/source_cloud_native_operator.py @@ -38,7 +38,7 @@ def index_frontmatter(): elif readingNav: nav.append(line.replace(".md", "")) if "quickstart.md" in line: - nav.append(" - interactive\n") + nav.append(" - interactive_demo\n") return INDEX_FRONTMATTER.format("".join(nav)) @@ -59,11 +59,14 @@ def process_md(file_path): if paragraph == 2: line = """ + + !!! Tip "Live demonstration" Don't want to install anything locally just yet? Try a demonstration directly in your browser: - [Cloud Native PostgreSQL Operator Interactive Quickstart](interactive/installation_and_deployment/) + [Cloud Native PostgreSQL Operator Interactive Quickstart](interactive_demo) + """ elif copying: line = rewrite_yaml_links(line)