diff --git a/.github/workflows/deploy-develop.yml b/.github/workflows/deploy-develop.yml index b0aaa6e40b1..fff12a223ea 100644 --- a/.github/workflows/deploy-develop.yml +++ b/.github/workflows/deploy-develop.yml @@ -9,8 +9,7 @@ jobs: steps: - name: Cleanup disk uses: curoky/cleanup-disk-action@v2.0 - with: - retain: "python,node" + - uses: actions/checkout@v2 with: ref: develop @@ -18,7 +17,7 @@ jobs: - name: Adjust file watchers limit run: echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p - - uses: actions/setup-node@v1 + - uses: actions/setup-node@v2 with: node-version: "14.x" - name: Install yarn @@ -28,6 +27,20 @@ jobs: env: NODE_ENV: ${{ secrets.NODE_ENV }} + - uses: actions/setup-python@v2 + with: + python-version: "3.x" + - uses: r-lib/actions/setup-pandoc@v1 + with: + pandoc-version: "2.14.1" + - name: Install wkhtmltopdf + run: | + curl -L https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb > wkhtmltopdf.deb + sudo apt-get install ./wkhtmltopdf.deb + + - name: Build all pdfs + run: npm run build-all-pdfs-ci + - name: Checking Gatsby cache id: gatsby-cache-build uses: actions/cache@v2 diff --git a/.github/workflows/deploy-main.yml b/.github/workflows/deploy-main.yml index 89254b43f19..f3670d96204 100644 --- a/.github/workflows/deploy-main.yml +++ b/.github/workflows/deploy-main.yml @@ -9,8 +9,7 @@ jobs: steps: - name: Cleanup disk uses: curoky/cleanup-disk-action@v2.0 - with: - retain: "python,node" + - uses: actions/checkout@v2 with: ref: main @@ -18,7 +17,7 @@ jobs: - name: Adjust file watchers limit run: echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p - - uses: actions/setup-node@v1 + - uses: actions/setup-node@v2 with: node-version: "14.x" - name: Install yarn @@ -28,6 +27,21 @@ jobs: env: NODE_ENV: ${{ secrets.NODE_ENV }} + - uses: actions/setup-python@v2 + with: + python-version: "3.x" + - uses: r-lib/actions/setup-pandoc@v1 + with: + pandoc-version: "2.14.1" + - name: Install wkhtmltopdf + run: | + curl -L https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb > wkhtmltopdf.deb + sudo apt-get install ./wkhtmltopdf.deb + + - name: Build all pdfs + run: npm run build-all-pdfs-ci + + - name: Checking Gatsby cache id: gatsby-cache-build uses: actions/cache@v2 diff --git a/.github/workflows/update-pdfs-on-develop.yml b/.github/workflows/update-pdfs-on-develop.yml deleted file mode 100644 index 15d9b540c9c..00000000000 --- a/.github/workflows/update-pdfs-on-develop.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: Update PDFs on Develop -on: - push: - branches: - - develop - paths: - - product_docs/docs/**.mdx - - scripts/pdf/generate_pdf.py - - .github/workflows/update-pdfs-on-develop.yml -jobs: - build-pdfs: - runs-on: ubuntu-20.04 - steps: - - name: Cleanup disk - uses: curoky/cleanup-disk-action@v2.0 - with: - retain: "python,node" - - uses: actions/checkout@v2 - with: - ref: develop - ssh-key: ${{ secrets.ADMIN_SECRET_SSH_KEY }} - - - uses: actions/setup-node@v1 - with: - node-version: "14.x" - - uses: actions/setup-python@v2 - with: - python-version: "3.x" - - uses: r-lib/actions/setup-pandoc@v1 - with: - pandoc-version: "2.10.1" - - name: Install wkhtmltopdf - run: | - curl -L https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb > wkhtmltopdf.deb - sudo apt-get install ./wkhtmltopdf.deb - - - name: Build all pdfs - run: npm run build-all-pdfs-ci - - - run: git status - - run: git pull - - name: Commit pdfs to develop - run: | - git config user.name josh-heyer - git config user.email josh.heyer@enterprisedb.com - git add *.pdf - git commit -m "New PDFs generated by Github Actions" - git push - - - uses: act10ns/slack@v1 - with: - status: ${{ job.status }} - if: failure() - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.gitignore b/.gitignore index 2df3774fd5b..373a0eb8c9b 100644 --- a/.gitignore +++ b/.gitignore @@ -74,9 +74,10 @@ yarn-error.log __pycache__ # Project specific +.project +advocacy_docs/kubernetes/cloud_native_postgresql/*.md.in dev-sources.json product_docs/content/ product_docs/content_build/ static/nginx_redirects.generated temp_kubernetes/ -advocacy_docs/kubernetes/cloud_native_postgresql/*.md.in diff --git a/README.md b/README.md index 5fc103b65a2..29249b36ef4 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,12 @@ This repo contains the React/Gatsby application that powers [the EDB Docs website](https://www.enterprisedb.com/docs/). The site pulls [Markdown](https://www.markdownguide.org/) content from several repos in a process called "sourcing", and then renders it all into high-performance markup. You can install the application on your local computer for easy editing, viewing, and eventually publishing to the GitHub repo. +## Please remove and re-clone your local repositories after August 13, 2021 + +We've been checking PDF files into Git. That's not a good plan, so [we've stopped doing that](https://github.com/EnterpriseDB/docs/issues/1632). The next step is to remove these files from Git history and [move other large files to LFS](https://docs.github.com/en/github/managing-large-files/versioning-large-files/moving-a-file-in-your-repository-to-git-large-file-storage). Among many other good things, that ought to reduce the time to clone this repository substantially. + +But it comes at a cost. If there are any local repositories that were cloned before the change, we risk introducing dirty history back into the repository. So we're asking that everyone who has a local repository they **cloned before (or on) August 13, 2021** to delete those repositories. Unfortunately, we'll need to reject any pull requests that introduce PDF files back into Git history. (If you need any help with this, please contact jon.ericson@enterprisedb.com.) + ## MacOS Installation We recommend using MacOS to work with the EDB Docs application. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx index 95e801d444c..4f439b66e36 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/api_reference.mdx @@ -39,6 +39,7 @@ Below you will find a description of the defined resources: - [ClusterSpec](#ClusterSpec) - [ClusterStatus](#ClusterStatus) - [ConfigMapKeySelector](#ConfigMapKeySelector) +- [ConfigMapResourceVersion](#ConfigMapResourceVersion) - [DataBackupConfiguration](#DataBackupConfiguration) - [EPASConfiguration](#EPASConfiguration) - [ExternalCluster](#ExternalCluster) @@ -66,13 +67,15 @@ Below you will find a description of the defined resources: AffinityConfiguration contains the info we need to create the affinity rules for Pods -Name | Description | Type ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- -`enablePodAntiAffinity` | Activates anti-affinity for the pods. The operator will define pods anti-affinity unless this field is explicitly set to false | *bool -`topologyKey ` | TopologyKey to use for anti-affinity configuration. See k8s documentation for more info on that - *mandatory* | string -`nodeSelector ` | NodeSelector is map of key-value pairs used to define the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | map[string]string -`tolerations ` | Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run on tainted nodes. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | []corev1.Toleration -`podAntiAffinityType ` | PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are added if all the existing nodes don't match the required pod anti-affinity rule. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity | string +Name | Description | Type +------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- +`enablePodAntiAffinity ` | Activates anti-affinity for the pods. The operator will define pods anti-affinity unless this field is explicitly set to false | *bool +`topologyKey ` | TopologyKey to use for anti-affinity configuration. See k8s documentation for more info on that - *mandatory* | string +`nodeSelector ` | NodeSelector is map of key-value pairs used to define the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | map[string]string +`tolerations ` | Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run on tainted nodes. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | []corev1.Toleration +`podAntiAffinityType ` | PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are added if all the existing nodes don't match the required pod anti-affinity rule. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity | string +`additionalPodAntiAffinity` | AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. | *corev1.PodAntiAffinity +`additionalPodAffinity ` | AdditionalPodAffinity allows to specify pod affinity terms to be passed to all the cluster's pods. | *corev1.PodAffinity @@ -152,6 +155,7 @@ Name | Description --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------- `s3Credentials ` | The credentials to use to upload data to S3 - *mandatory* | [S3Credentials](#S3Credentials) `endpointURL ` | Endpoint to be used to upload data to the cloud, overriding the automatic endpoint discovery | string +`endpointCA ` | EndpointCA store the CA bundle of the barman endpoint. Useful when using self-signed certificates to avoid errors with certificate issuer and barman-cloud-wal-archive | [*SecretKeySelector](#SecretKeySelector) `destinationPath` | The path where to store the backup (i.e. s3://bucket/path/to/folder) this path, with different destination folders, will be used for WALs and for data - *mandatory* | string `serverName ` | The server name on S3, the cluster name is used if this parameter is omitted | string `wal ` | The configuration for the backup of the WAL stream. When not defined, WAL files will be stored uncompressed and may be unencrypted in the object store, according to the bucket default policy. | [*WalBackupConfiguration](#WalBackupConfiguration) @@ -210,18 +214,13 @@ Name | Description CertificatesConfiguration contains the needed configurations to handle server certificates. -Name | Description | Type ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- -`serverCASecret ` | The secret containing the Server CA certificate. If not defined, a new secret will be created with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret. - -Contains: - -- `ca.crt`: CA that should be used to validate the server certificate, - used as `sslrootcert` in client connection strings. -- `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, - this can be omitted. | string -`serverTLSSecret ` | The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. If not defined, ServerCASecret must provide also `ca.key` and a new secret will be created using the provided CA. | string -`serverAltDNSNames` | The list of the server alternative DNS names to be added to the generated server TLS certificates, when required. | []string +Name | Description | Type +-------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- +`serverCASecret ` | The secret containing the Server CA certificate. If not defined, a new secret will be created with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.

Contains:

- `ca.crt`: CA that should be used to validate the server certificate, used as `sslrootcert` in client connection strings.
- `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, this can be omitted.
| string +`serverTLSSecret ` | The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. If not defined, ServerCASecret must provide also `ca.key` and a new secret will be created using the provided CA. | string +`replicationTLSSecret` | The secret of type kubernetes.io/tls containing the client certificate to authenticate as the `streaming_replica` user. If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be created using the provided CA. | string +`clientCASecret ` | The secret containing the Client CA certificate. If not defined, a new secret will be created with a self-signed CA and will be used to generate all the client certificates.

Contains:

- `ca.crt`: CA that should be used to validate the client certificates, used as `ssl_ca_file` of all the instances.
- `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, this can be omitted.
| string +`serverAltDNSNames ` | The list of the server alternative DNS names to be added to the generated server TLS certificates, when required. | []string @@ -229,15 +228,9 @@ Contains: CertificatesStatus contains configuration certificates and related expiration dates. -Name | Description | Type --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- -`clientCASecret ` | The secret containing the Client CA certificate. This secret contains a self-signed CA and is used to sign TLS certificates used for client authentication. - -Contains: - -- `ca.crt`: CA that should be used to validate the client certificate, used as `ssl_ca_file`. - `ca.key`: key used to sign client SSL certs. | string -`replicationTLSSecret` | The secret of type kubernetes.io/tls containing the TLS client certificate to authenticate as `streaming_replica` user. | string -`expirations ` | Expiration dates for all certificates. | map[string]string +Name | Description | Type +----------- | -------------------------------------- | ----------------- +`expirations` | Expiration dates for all certificates. | map[string]string @@ -301,26 +294,27 @@ Name | Description ClusterStatus defines the observed state of Cluster -Name | Description | Type ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- -`instances ` | Total number of instances in the cluster | int32 -`readyInstances ` | Total number of ready instances in the cluster | int32 -`instancesStatus ` | Instances status | map[utils.PodStatus][]string -`latestGeneratedNode ` | ID of the latest generated node (used to avoid node name clashing) | int32 -`currentPrimary ` | Current primary instance | string -`targetPrimary ` | Target primary instance, this is different from the previous one during a switchover or a failover | string -`pvcCount ` | How many PVCs have been created by this cluster | int32 -`jobCount ` | How many Jobs have been created by this cluster | int32 -`danglingPVC ` | List of all the PVCs created by this cluster and still available which are not attached to a Pod | []string -`initializingPVC ` | List of all the PVCs that are being initialized by this cluster | []string -`healthyPVC ` | List of all the PVCs not dangling nor initializing | []string -`licenseStatus ` | Status of the license | licensekey.Status -`writeService ` | Current write pod | string -`readService ` | Current list of read pods | string -`phase ` | Current phase of the cluster | string -`phaseReason ` | Reason for the current phase | string -`secretsResourceVersion` | The list of resource versions of the secrets managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the secret data | [SecretsResourceVersion](#SecretsResourceVersion) -`certificates ` | The configuration for the CA and related certificates, initialized with defaults. | [CertificatesStatus](#CertificatesStatus) +Name | Description | Type +------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- +`instances ` | Total number of instances in the cluster | int32 +`readyInstances ` | Total number of ready instances in the cluster | int32 +`instancesStatus ` | Instances status | map[utils.PodStatus][]string +`latestGeneratedNode ` | ID of the latest generated node (used to avoid node name clashing) | int32 +`currentPrimary ` | Current primary instance | string +`targetPrimary ` | Target primary instance, this is different from the previous one during a switchover or a failover | string +`pvcCount ` | How many PVCs have been created by this cluster | int32 +`jobCount ` | How many Jobs have been created by this cluster | int32 +`danglingPVC ` | List of all the PVCs created by this cluster and still available which are not attached to a Pod | []string +`initializingPVC ` | List of all the PVCs that are being initialized by this cluster | []string +`healthyPVC ` | List of all the PVCs not dangling nor initializing | []string +`licenseStatus ` | Status of the license | licensekey.Status +`writeService ` | Current write pod | string +`readService ` | Current list of read pods | string +`phase ` | Current phase of the cluster | string +`phaseReason ` | Reason for the current phase | string +`secretsResourceVersion ` | The list of resource versions of the secrets managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the secret data | [SecretsResourceVersion](#SecretsResourceVersion) +`configMapResourceVersion` | The list of resource versions of the configmaps, managed by the operator. Every change here is done in the interest of the instance manager, which will refresh the configmap data | [ConfigMapResourceVersion](#ConfigMapResourceVersion) +`certificates ` | The configuration for the CA and related certificates, initialized with defaults. | [CertificatesStatus](#CertificatesStatus) @@ -332,6 +326,16 @@ Name | Description | Type --- | ----------------- | ------ `key` | The key to select - *mandatory* | string + + +## ConfigMapResourceVersion + +ConfigMapResourceVersion is the resource versions of the secrets managed by the operator + +Name | Description | Type +------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------- +`metrics` | A map with the versions of all the config maps used to pass metrics. Map keys are the config map names, map values are the versions | map[string]string + ## DataBackupConfiguration @@ -530,15 +534,17 @@ Name | Description | Type SecretsResourceVersion is the resource versions of the secrets managed by the operator -Name | Description | Type ------------------------- | -------------------------------------------------------------------- | ------ -`superuserSecretVersion ` | The resource version of the "postgres" user secret | string -`replicationSecretVersion` | The resource version of the "streaming_replication" user secret | string -`applicationSecretVersion` | The resource version of the "app" user secret | string -`caSecretVersion ` | Unused. Retained for compatibility with old versions. | string -`clientCaSecretVersion ` | The resource version of the PostgreSQL client-side CA secret version | string -`serverCaSecretVersion ` | The resource version of the PostgreSQL server-side CA secret version | string -`serverSecretVersion ` | The resource version of the PostgreSQL server-side secret version | string +Name | Description | Type +------------------------ | --------------------------------------------------------------------------------------------------------------------------- | ----------------- +`superuserSecretVersion ` | The resource version of the "postgres" user secret | string +`replicationSecretVersion` | The resource version of the "streaming_replica" user secret | string +`applicationSecretVersion` | The resource version of the "app" user secret | string +`caSecretVersion ` | Unused. Retained for compatibility with old versions. | string +`clientCaSecretVersion ` | The resource version of the PostgreSQL client-side CA secret version | string +`serverCaSecretVersion ` | The resource version of the PostgreSQL server-side CA secret version | string +`serverSecretVersion ` | The resource version of the PostgreSQL server-side secret version | string +`barmanEndpointCA ` | The resource version of the Barman Endpoint CA if provided | string +`metrics ` | A map with the versions of all the secrets used to pass metrics. Map keys are the secret names, map values are the versions | map[string]string diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx index b96cf1ec629..e6152653e81 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/backup_recovery.mdx @@ -17,6 +17,12 @@ You can use the image `quay.io/enterprisedb/postgresql` for this scope, as it is composed of a community PostgreSQL image and the latest `barman-cli-cloud` package. +!!! Warning + Cloud Native PostgreSQL does not currently manage the deletion of backup files + from the backup object store. The retention policy feature will be merged from + Barman to Barman Cloud in the future. For the time being, it is your responsibility + to configure retention policies directly on the object store. + ## Cloud credentials You can archive the backup files in any service whose API is compatible @@ -94,6 +100,16 @@ spec: [...] ``` +!!! Important + Suppose you configure an Object Storage provider which uses a certificated signed with a private CA, + like when using OpenShift or MinIO via HTTPS. In that case, you need to set the option `endpointCA` + referring to a secret containing the CA bundle so that Barman can verify the certificate correctly. + +!!! Note + If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can + add a label with key `k8s.enterprisedb.io/reload` to it, otherwise you will have to reload + the instances using the `kubectl cnp reload` subcommand. + ### MinIO Gateway Optionally, you can use MinIO Gateway as a common interface which diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx index 30169f5b0a1..2c7da4c9678 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/certificates.mdx @@ -16,13 +16,29 @@ In order to set up a `Cluster`, the operator requires: You can find all the secrets used by the cluster and their expiration dates in the cluster's status. +Cloud Native PostgreSQL is very flexible when it comes to TLS certificates, and +primarily operates in two modes: + +1. [**operator managed**](#operator-managed-mode): certificates are internally + managed by the operator in a fully automated way, and signed using a CA created + by Cloud Native PostgreSQL +2. [**user provided**](#user-provided-certificates-mode): certificates are + generated outside the operator and imported in the cluster definition as + secrets - Cloud Native PostgreSQL integrates itself with cert-manager (see + examples below) + +You can also choose a hybrid approach, where only part of the certificates is +generated outside CNP. + ## Operator managed mode By default, the operator generates a single Certification Authority and uses it for both client and server certificates, which are then managed and renewed automatically. -### Server CA Secret +### Server Certificates + +#### Server CA Secret The operator generates a self-signed CA and stores it in a generic secret containing the following keys: @@ -30,19 +46,37 @@ containing the following keys: - `ca.crt`: CA certificate used to validate the server certificate, used as `sslrootcert` in clients' connection strings. - `ca.key`: the key used to sign Server SSL certificate automatically -### Server TLS Secret +#### Server TLS Secret The operator uses the generated self-signed CA to sign a server TLS certificate, stored in a Secret of type `kubernetes.io/tls` and configured to be used as `ssl_cert_file` and `ssl_key_file` by the instances so that clients can verify their identity and connect securely. -### Server alternative DNS names +#### Server alternative DNS names You can specify DNS server alternative names that will be part of the generated server TLS secret in addition to the default ones. -## User-provided server certificate mode +### Client Certificates + +#### Client CA Secret + +The same self-signed CA as the Server CA is used by default. The public part +will be passed as `ssl_ca_file` to all the instances in order to be able to verify +client certificates it signed. The private key will be stored in the same secret and +used to sign Client certificates generated by the `kubectl cnp` plugin. + +#### Client streaming_replica Certificate + +The operator uses the generated self-signed CA to sign a client certificate for +the user `streaming_replica`, storing it in a Secret of type `kubernetes.io/tls`. +This certificate will be passed as `sslcert` and `sslkey` in replicas' connection strings, +to allow securely connecting to the primary instance. + +## User-provided certificates mode + +### Server Certificates If required, you can also provide the two server certificates, generating them using a separate component such as [cert-manager](https://cert-manager.io/). In @@ -58,9 +92,14 @@ the following parameters: The operator will still create and manage the two secrets related to client certificates. +!!! Note + If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can + add a label with key `k8s.enterprisedb.io/reload` to it, otherwise you will have to reload + the instances using the `kubectl cnp reload` subcommand. + See below for a complete example. -### Example +#### Example Given the following files: @@ -103,7 +142,7 @@ EOF The new cluster will use the provided server certificates for TLS connections. -### Cert-manager Example +#### Cert-manager Example Here a simple example about how to use [cert-manager](https://cert-manager.io/) to set up a self-signed CA and generate the needed TLS server certificate: @@ -117,6 +156,13 @@ metadata: spec: selfSigned: {} --- +apiVersion: v1 +kind: Secret +metadata: + name: my-postgres-server-cert + labels: + k8s.enterprisedb.io/reload: "" +--- apiVersion: cert-manager.io/v1 kind: Certificate metadata: @@ -158,3 +204,89 @@ spec: storage: size: 1Gi ``` + +You can find a complete example using cert-manager to manage both server and client CA and certificates in +the [cluster-example-cert-manager.yaml](../samples/cluster-example-cert-manager.yaml) deployment manifest. + +### Client Certificate + +If required, you can also provide the two client certificates, generating them +using a separate component such as [cert-manager](https://cert-manager.io/) or +[hashicorp vault](https://www.vaultproject.io/docs/secrets/pki). In order to +use a custom CA to verify client certificates for a Cluster, you must specify +the following parameters: + +- `replicationTLSSecret`: the name of a Secret of type `kubernetes.io/tls`, + containing the client certificate for user `streaming_replica`. It must contain + both the standard `tls.crt` and `tls.key` keys. +- `clientCASecret`: the name of a Secret containing the `ca.crt` key of the CA + that should be used to verify client certificate. + +!!! Note + The operator will still create and manage the two secrets related to server + certificates. + +!!! Note + As the Cluster is not in control of the client CA secret key, client certificates + can not be generated using `kubectl cnp certificate` anymore. + +!!! Note + If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can + add a label with key `k8s.enterprisedb.io/reload` to it, otherwise you will have to reload + the instances using the `kubectl cnp reload` subcommand. + +#### Cert-manager Example + +Here a simple example about how to use [cert-manager](https://cert-manager.io/) to set up a self-signed CA and generate +the needed TLS server certificate: + +```yaml +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer +spec: + selfSigned: {} +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-postgres-client-cert + labels: + k8s.enterprisedb.io/reload: "" +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: my-postgres-client-cert +spec: + secretName: my-postgres-client-cert + usages: + - client auth + commonName: streaming_replica + issuerRef: + name: selfsigned-issuer + kind: Issuer + group: cert-manager.io +``` + +A Secret named `my-postgres-client-cert` is created by cert-manager, containing all the needed files and can be referenced +from a Cluster as follows: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + certificates: + clientCASecret: my-postgres-client-cert + replicationTLSSecret: my-postgres-client-cert + storage: + size: 1Gi +``` + +You can find a complete example using cert-manager to manage both server and client CA and certificates in +the [cluster-example-cert-manager.yaml](../samples/cluster-example-cert-manager.yaml) deployment manifest. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx index d09ea770824..b5ad6bc4dd9 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/cnp-plugin.mdx @@ -159,3 +159,19 @@ The following command will restart a given cluster in a rollout fashion: ```shell kubectl cnp restart [cluster_name] ``` + +!!! Note + If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can + add a label with key `k8s.enterprisedb.io/reload` to it. + +### Reload + +The `kubectl cnp reload` command requests the operator to trigger a reconciliation +loop for a certain cluster. This is useful to apply configuration changes +to cluster dependent objects, such as ConfigMaps containing custom monitoring queries. + +The following command will reload all configurations for a given cluster: + +```shell +kubectl cnp reload [cluster_name] +``` diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx index 6865e378bc4..f7c5a0f6fbf 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/e2e.mdx @@ -34,7 +34,7 @@ and the following suite of E2E tests are performed on that cluster: * Creation of a `Cluster`; * Usage of a persistent volume for data storage; * Connection via services, including read-only; -* Connection via user provided server certificate and client certificate generated by `kubectl-cnp`; +* Connection via user-provided server and/or client certificates; * Scale-up and scale-down of a `Cluster`; * Failover; * Switchover; @@ -55,7 +55,9 @@ and the following suite of E2E tests are performed on that cluster: * Primary endpoint switch in case of failover in less than 10 seconds; the threshold is raised to 20 seconds on GKE and 30 on AKS; * Primary endpoint switch in case of switchover in less than 20 seconds; -* Recover from a degraded state in less than 60 seconds. +* Recover from a degraded state in less than 60 seconds; +* Physical replica clusters; +* Storage expansion. The E2E tests suite is also run for OpenShift versions 4.5, 4.6, and 4.7 and the latest Kubernetes and PostgreSQL releases on clusters created on the diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx index 04066a7a876..ee7af7ebf25 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/index.mdx @@ -18,8 +18,9 @@ navigation: - interactive_demo - cloud_setup - bootstrap - - resource_management - security + - scheduling + - resource_management - failure_modes - rolling_update - backup_recovery @@ -30,12 +31,12 @@ navigation: - samples - monitoring - logging - - expose_pg_services - certificates - ssl_connections - kubernetes_upgrade - - e2e + - expose_pg_services - cnp-plugin + - e2e - license_keys - container_images - operator_capability_levels @@ -70,13 +71,14 @@ You can [evaluate Cloud Native PostgreSQL for free](evaluation.md). You need a valid license key to use Cloud Native PostgreSQL in production. !!! Important - Currently, based on the [Operator Capability Levels model](operator_capability_levels.md), - users can expect a **"Level III - Full Lifecycle"** set of capabilities from the + Based on the [Operator Capability Levels model](operator_capability_levels.md), + users can expect a **"Level V - Auto Pilot"** set of capabilities from the Cloud Native PostgreSQL Operator. ## Requirements -Cloud Native PostgreSQL requires Kubernetes 1.16 or higher, tested on AWS, Google, Azure (with multiple availability zones). +Cloud Native PostgreSQL requires Kubernetes 1.16 or higher, tested on AWS, +Google, Azure (with multiple availability zones). Cloud Native PostgreSQL has also been certified for [RedHat OpenShift Container Platform (OCP)](https://www.openshift.com/products/container-platform) @@ -105,6 +107,7 @@ PostgreSQL and EDB Postgres Advanced 13, 12, 11 and 10 are currently supported. * Reuse of Persistent Volumes storage in Pods * Rolling updates for PostgreSQL minor versions and operator upgrades * TLS connections and client certificate authentication +* Support for custom TLS certificates (including integration with cert-manager) * Continuous backup to an S3 compatible object store * Full recovery and Point-In-Time recovery from an S3 compatible object store backup * Support for Synchronous Replicas diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx index 80a03ef69bc..5528cbbc604 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/installation_upgrade.mdx @@ -11,12 +11,12 @@ product: 'Cloud Native Operator' The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.6.0.yaml) +You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.7.0.yaml) as follows: ```sh kubectl apply -f \ - https://get.enterprisedb.io/cnp/postgresql-operator-1.6.0.yaml + https://get.enterprisedb.io/cnp/postgresql-operator-1.7.0.yaml ``` Once you have run the `kubectl` command, Cloud Native PostgreSQL will be installed in your Kubernetes cluster. @@ -42,8 +42,8 @@ from the [OperatorHub.io website](https://operatorhub.io), following the install The operator can be installed using the provided [Helm chart](https://github.com/EnterpriseDB/cloud-native-postgresql-helm). !!! Important - Helm does not allow to update CRDs, as discussed [here](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations), - therefore take care to follow the instructions in the chart documentation in order to update them. + Helm does not support the update of CRDs. For further information, please refer to the + [instructions in the Helm chart documentation](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations). ## Installation on Openshift diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx index f8beb010152..0d9dbc72235 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/interactive_demo.mdx @@ -65,7 +65,7 @@ You will see one node called `minikube`. If the status isn't yet "Ready", wait f Now that the Minikube cluster is running, you can proceed with Cloud Native PostgreSQL installation as described in the ["Installation"](installation_upgrade.md) section: ```shell -kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.6.0.yaml +kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.7.0.yaml __OUTPUT__ namespace/postgresql-operator-system created customresourcedefinition.apiextensions.k8s.io/backups.postgresql.k8s.enterprisedb.io created @@ -278,7 +278,7 @@ curl -sSfL \ sudo sh -s -- -b /usr/local/bin __OUTPUT__ EnterpriseDB/kubectl-cnp info checking GitHub for latest tag -EnterpriseDB/kubectl-cnp info found version: 1.6.0 for v1.6.0/linux/x86_64 +EnterpriseDB/kubectl-cnp info found version: 1.7.0 for v1.7.0/linux/x86_64 EnterpriseDB/kubectl-cnp info installed /usr/local/bin/kubectl-cnp ``` diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx index d179cb46171..bbe9f0d1b79 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/license_keys.mdx @@ -10,11 +10,6 @@ The only exception is when you run the operator with Community PostgreSQL: in this case, if the license key is unset, a cluster will be started with the default trial license - which automatically expires after 30 days. -!!! Important - After the license expiration, the operator will cease any reconciliation attempt - on the cluster, effectively stopping to manage its status. - The pods and the data will still be available. - ## Company level license keys A license key allows you to create an unlimited number of PostgreSQL @@ -24,7 +19,7 @@ The license key needs to be available in a `ConfigMap` in the same namespace where the operator is deployed. !!! Seealso "Operator configuration" -For more information, please refer to the ["Operator configuration"](operator_conf.md) section. + For more information, please refer to the ["Operator configuration"](operator_conf.md) section. Once the company level license is installed, the validity of the license key can be checked inside the cluster status. @@ -117,3 +112,12 @@ Cloud Native PostgreSQL is distributed under the EnterpriseDB Limited Usage Lice Agreement, available at [enterprisedb.com/limited-use-license](https://www.enterprisedb.com/limited-use-license). Cloud Native PostgreSQL: Copyright (C) 2019-2021 EnterpriseDB. + +## What happens when a license expires + +After the license expiration, the operator will cease any reconciliation +attempt on the cluster, effectively stopping to manage its status. This also +includes any self-healing and high availability capabilities, such as automated +failover and switchovers. + +The pods and the data will still be available. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx index 16730a03abb..13ded55490c 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/logging.mdx @@ -4,22 +4,22 @@ originalFilePath: 'src/logging.md' product: 'Cloud Native Operator' --- -The operator is designed to log in JSON format directly to standard -output, including PostgreSQL logs. +The operator is designed to log in JSON format directly to standard output, +including PostgreSQL logs. Each log entry has the following fields: - `level`: log level (`info`, `notice`, ...) - `ts`: the timestamp (epoch with microseconds) - `logger`: the type of the record (e.g. `postgres` or `pg_controldata`) -- `msg`: the actual message or the keyword `record` in case the message is - parsed in JSON format +- `msg`: the actual message or the keyword `record` in case the message is parsed in JSON format - `record`: the actual record (with structure that varies depending on the `logger` type) ## PostgreSQL log -Each entry in the PostgreSQL log is a JSON object having the `logger` key set to `postgres` and the structure described in the following example: +Each entry in the PostgreSQL log is a JSON object having the `logger` key set +to `postgres` and the structure described in the following example: ```json { @@ -56,9 +56,104 @@ Each entry in the PostgreSQL log is a JSON object having the `logger` key set to } ``` -Internally, the operator relies on the PostgreSQL CSV log format. -Please refer to the PostgreSQL documentation for more information -about the [CSV log format](https://www.postgresql.org/docs/current/runtime-config-logging.html). +Internally, the operator relies on the PostgreSQL CSV log format. Please refer +to the PostgreSQL documentation for more information about the [CSV log +format](https://www.postgresql.org/docs/current/runtime-config-logging.html). + +## PGAudit logs + +Cloud Native PostgreSQL has transparent and native support for +[PGAudit](https://www.pgaudit.org/) on PostgreSQL clusters. + +All you need to do is add the required `pgaudit` parameters to the `postgresql` +section in the configuration of the cluster. + +!!! Important + It is unnecessary to add the PGAudit library to `shared_preload_libraries`. + The library will be added automatically by Cloud Native PostgreSQL based on the + presence of `pgaudit.*` parameters in the postgresql configuration. + The operator will detect and manage the addition and removal of the + library from `shared_preload_libraries`. + +The operator also takes care of creating and removing the extension from all +the available databases in the cluster. + +!!! Important + Cloud Native PostgreSQL runs the `CREATE EXTENSION` and + `DROP EXTENSION` command in all databases in the cluster that accept + connections. + +Here is an example of a PostgreSQL 13 `Cluster` deployment which will result in +`pgaudit` being enabled with the requested configuration: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + imageName: quay.io/enterprisedb/postgresql:13 + + postgresql: + parameters: + "pgaudit.log": "all, -misc" + "pgaudit.log_catalog": "off" + "pgaudit.log_parameter": "on" + "pgaudit.log_relation": "on" + + storage: + size: 1Gi +``` + +The audit CSV logs entries returned by PGAudit are then parsed and routed to +stdout in JSON format, similarly to all the remaining logs: + +- `.logger` is set to `pgaudit` +- `.msg` is set to `record` +- `.record` contains the whole parsed record as a JSON object, similar to + `logging_collector` logs - except for `.record.audit`, which contains the + PGAudit CSV message formatted as a JSON object + +See the example below: + +```json +{ + "level": "info", + "ts": 1627394507.8814096, + "logger": "pgaudit", + "msg": "record", + "record": { + "log_time": "2021-07-27 14:01:47.881 UTC", + "user_name": "postgres", + "database_name": "postgres", + "process_id": "203", + "connection_from": "[local]", + "session_id": "610011cb.cb", + "session_line_num": "1", + "command_tag": "SELECT", + "session_start_time": "2021-07-27 14:01:47 UTC", + "virtual_transaction_id": "3/336", + "transaction_id": "0", + "error_severity": "LOG", + "sql_state_code": "00000", + "backend_type": "client backend", + "audit": { + "audit_type": "SESSION", + "statement_id": "1", + "substatement_id": "1", + "class": "READ", + "command": "SELECT FOR KEY SHARE", + "statement": "SELECT pg_current_wal_lsn()", + "parameter": "" + } + } +} +``` + +Please refer to the +[PGAudit documentation](https://github.com/pgaudit/pgaudit/blob/master/README.md#format) +for more details about each field in a record. ## EDB Audit logs @@ -106,6 +201,7 @@ The audit CSV logs are parsed and routed to stdout in JSON format, similarly to - `.record` containing the whole parsed record as a JSON object See the example below: + ```json { "level": "info", @@ -149,18 +245,21 @@ See EDB Audit [documentation](https://www.enterprisedb.com/edb-docs/d/edb-postgr ## Other logs -All logs that are produced by the operator and its instances are in JSON format, with `logger` set accordingly to the process -that produced them. Therefore, all the possible `logger` values are the following ones: +All logs that are produced by the operator and its instances are in JSON +format, with `logger` set accordingly to the process that produced them. +Therefore, all the possible `logger` values are the following ones: - `barman-cloud-wal-archive` - `barman-cloud-wal-restore` +- `edb_audit` - `initdb` - `pg_basebackup` - `pg_controldata` - `pg_ctl` - `pg_rewind` +- `pgaudit` - `postgres` -- `edb_audit` -Except for `postgres` and `edb_audit` that have the aforementioned structures, all other possible values -just have `msg` set to the escaped message that is logged. +Except for `postgres` and `edb_audit` that have the aforementioned structures, +all other possible values just have `msg` set to the escaped message that is +logged. diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx index 5e0953a1c0d..935fda54f09 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/monitoring.mdx @@ -10,7 +10,8 @@ For each PostgreSQL instance, the operator provides an exporter of metrics for [Prometheus](https://prometheus.io/) via HTTP, on port 9187, named `metrics`. The operator comes with a predefined set of metrics, as well as a highly configurable and customizable system to define additional queries via one or -more `ConfigMap` or `Secret` resources. +more `ConfigMap` or `Secret` resources (see the +["User defined metrics" section](#user-defined-metrics) below for details). Metrics can be accessed as follows: @@ -29,18 +30,17 @@ Please refer to the "Default roles" section in PostgreSQL [documentation](https://www.postgresql.org/docs/current/default-roles.html) for details on the `pg_monitor` role. -Currently, metrics' queries can be run only against a single database, chosen -depending on the specified `bootstrap` method in the `Cluster` resource, -according to the following logic: +Queries, by default, are run against the *main database*, as defined by +the specified `bootstrap` method of the `Cluster` resource, according +to the following logic: -- using `initdb`: queries will be run against the specified database, so the +- using `initdb`: queries will be run against the specified database by default, so the value passed as `initdb.database` or defaulting to `app` if not specified. -- not using `initdb`: queries will be run against the `postgres` database. +- not using `initdb`: queries will run against the `postgres` database, by default. + +The default database can always be overridden for a given user-defined metric, +by specifying a list of one or more databases in the `target_databases` option. -!!! Note - This behaviour will be improved starting from the next version of Cloud - Native PostgreSQL. - ### Prometheus Operator example A specific PostgreSQL cluster can be monitored using the @@ -96,7 +96,12 @@ The `customQueriesConfigMap`/`customQueriesSecret` sections contain a list of `ConfigMap`/`Secret` references specifying the key in which the custom queries are defined. Take care that the referred resources have to be created **in the same namespace as the Cluster** resource. -### Example of user defined metric +!!! Note + If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can + add a label with key `k8s.enterprisedb.io/reload` to it, otherwise you will have to reload + the instances using the `kubectl cnp reload` subcommand. + +#### Example of a user defined metric Here you can see an example of a `ConfigMap` containing a single custom query, referenced by the `Cluster` example above: @@ -107,6 +112,8 @@ kind: ConfigMap metadata: name: example-monitoring namespace: test + labels: + k8s.enterprisedb.io/reload: "" data: custom-queries: | pg_replication: @@ -123,6 +130,87 @@ data: A list of basic monitoring queries can be found in the [`cnp-basic-monitoring.yaml` file](../samples/cnp-basic-monitoring.yaml). +#### Example of a user defined metric running on multiple databases + +If the `target_databases` option lists more than one database +the metric is collected from each of them. + +Database auto-discovery can be enabled for a specific query by specifying a +*shell-like pattern* (i.e., containing `*`, `?` or `[]`) in the list of +`target_databases`. If provided, the operator will expand the list of target +databases by adding all the databases returned by the execution of `SELECT +datname FROM pg_database WHERE datallowconn AND NOT datistemplate` and matching +the pattern according to [path.Match()](https://pkg.go.dev/path#Match) rules. + +!!! Note + The `*` character has a [special meaning](https://yaml.org/spec/1.2/spec.html#id2786448) in yaml, + so you need to quote (`"*"`) the `target_databases` value when it includes such a pattern. + +It is recommended that you always include the name of the database +in the returned labels, for example using the `current_database()` function +as in the following example: + +```yaml +some_query: + query: | + SELECT + current_database() as datname, + count(*) as rows + FROM some_table + metrics: + - datname: + usage: "LABEL" + description: "Name of current database" + - rows: + usage: "GAUGE" + description: "number of rows" + target_databases: + - albert + - bb + - freddie +``` + +This will produce in the following metric being exposed: + +```text +cnp_some_query_rows{datname="albert"} 2 +cnp_some_query_rows{datname="bb"} 5 +cnp_some_query_rows{datname="freddie"} 10 +``` + +Here is an example of a query with auto-discovery enabled which also +runs on the `template1` database (otherwise not returned by the +aforementioned query): + +```yaml +some_query: + query: | + SELECT + current_database() as datname, + count(*) as rows + FROM some_table + metrics: + - datname: + usage: "LABEL" + description: "Name of current database" + - rows: + usage: "GAUGE" + description: "number of rows" + target_databases: + - "*" + - "template1" +``` + +The above example will produce the following metrics (provided the databases exist): + +```text +cnp_some_query_rows{datname="albert"} 2 +cnp_some_query_rows{datname="bb"} 5 +cnp_some_query_rows{datname="freddie"} 10 +cnp_some_query_rows{datname="template1"} 7 +cnp_some_query_rows{datname="postgres"} 42 +``` + ### Structure of a user defined metric Every custom query has the following basic structure: @@ -142,6 +230,9 @@ Here is a short description of all the available fields: - `query`: the SQL query to run on the target database to generate the metrics - `primary`: whether to run the query only on the primary instance - `master`: same as `primary` (for compatibility with the Prometheus PostgreSQL exporter's syntax - deprecated) + - `target_databases`: a list of databases to run the `query` against, + or a [shell-like pattern](#example-of-a-user-defined-metric-running-on-multiple-databases) + to enable auto discovery. Overwrites the default database if provided. - `metrics`: section containing a list of all exported columns, defined as follows: - ``: the name of the column returned by the query - `usage`: one of the values described below diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx index 7461b38c07c..946df528027 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/operator_capability_levels.mdx @@ -6,11 +6,16 @@ product: 'Cloud Native Operator' This section provides a summary of the capabilities implemented by Cloud Native PostgreSQL, classified using the -["Operator SDK definition of Capability Levels"](https://sdk.operatorframework.io/docs/advanced-topics/operator-capabilities/operator-capabilities/) +["Operator SDK definition of Capability Levels"](https://operatorframework.io/operator-capabilities/) framework. ![Operator Capability Levels](./images/operator-capability-level.png) +!!! Important + Based on the [Operator Capability Levels model](operator_capability_levels.md), + You can expect a **"Level V - Auto Pilot"** set of capabilities from the + Cloud Native PostgreSQL Operator. + Each capability level is associated with a certain set of management features the operator offers: 1. Basic Install @@ -26,7 +31,7 @@ Each capability level is associated with a certain set of management features th Capability level 1 involves **installation** and **configuration** of the operator. This category includes usability and user experience -enhancements, such as improvements in how users interact with the +enhancements, such as improvements in how you interact with the operator and a PostgreSQL cluster configuration. !!! Important @@ -57,7 +62,7 @@ version of the latest stable major version supported by the PostgreSQL Community and published on Quay.io by EnterpriseDB. You can use any compatible image of PostgreSQL supporting the primary/standby architecture directly by setting the `imageName` -attribute in the CR. The operator also supports `imagePullSecretsNames` +attribute in the CR. The operator also supports `imagePullSecrets` to access private container registries, as well as digests in addition to tags for finer control of container image immutability. @@ -85,7 +90,7 @@ dependencies. Storage is a critical component in a database workload. Taking advantage of Kubernetes native capabilities and resources in terms of storage, the -operator gives users enough flexibility to choose the right storage for their +operator gives you enough flexibility to choose the right storage for your workload requirements, based on what the underlying Kubernetes environment can offer. This implies choosing a particular storage class in a public cloud environment or fine-tuning the generated PVC through a @@ -113,13 +118,13 @@ Using the convention over configuration approach, the operator creates a database called `app`, by default owned by a regular Postgres user with the same name. Both the database name and the user name can be specified if required. -Although no configuration is required to run the cluster, users can customize +Although no configuration is required to run the cluster, you can customize both PostgreSQL run-time configuration and PostgreSQL Host-Based Authentication rules in the `postgresql` section of the CR. ### Pod Security Policies -For InfoSec requirements, the operator does not require privileged mode for +For InfoSec requirements, the operator does not require privileged mode for any container and enforces read only root filesystem to guarantee containers immutability for both the operator and the operand pods. It also explicitly sets the required security contexts. @@ -132,13 +137,13 @@ allocated UID and SELinux context. ### Affinity -The operator supports basic pod affinity/anti-affinity rules to deploy PostgreSQL -pods on different nodes, based on the selected `topologyKey` (for example `node` or -`zone`). it supports node affinity/anti-affinity through the `nodeSelector` -configuration attribute, to be specified as [expected by Kubernetes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) -and tolerations through the `tolerations` configuration attribute, which will be added for all the pods created by the -operator related to a specific Cluster, using kubernetes [standard syntax](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). +The cluster's `affinity` section enables fine-tuning of how pods and related +resources such as persistent volumes are scheduled across the nodes of a +Kubernetes cluster. In particular, the operator supports: +- pod affinity and anti-affinity +- node selector +- taints and tolerations ### License keys @@ -154,6 +159,13 @@ as a secret by the operator to pull down an image from a protected container registry. Beyond the expiration date, the operator will stop any reconciliation process until the license key is restored. +### Command line interface + +Cloud Native PostgreSQL does not have its own command line interface. +It simply relies on the best command line interface for Kubernetes, `kubectl`, +by providing a plugin called `cnp` to enhance and simplify your PostgreSQL +cluster management experience. + ### Current status of the cluster The operator continuously updates the status section of the CR with the @@ -178,6 +190,8 @@ Kubernetes API Server and the operator itself. The operator automatically creates a certification authority for every PostgreSQL cluster, which is used to issue and renew TLS certificates for clients' authentication, including streaming replication standby servers (instead of passwords). +Support for a custom certification authority for client certificates is +available through secrets: this also includes integration with cert-manager. Certificates can be issued with the `cnp` plugin for `kubectl`. ### TLS connections @@ -185,7 +199,8 @@ Certificates can be issued with the `cnp` plugin for `kubectl`. The operator transparently and natively supports TLS/SSL connections to encrypt client/server communications for increased security using the cluster's certification authority. -Support for custom server certificates is available through secrets. +Support for custom server certificates is available through secrets: this also +includes integration with cert-manager. ### Certificate authentication for streaming replication @@ -195,7 +210,7 @@ replication connections from the standby servers, instead of relying on a passwo ### Continuous configuration management -The operator enables users to apply changes to the `Cluster` resource YAML +The operator enables you to apply changes to the `Cluster` resource YAML section of the PostgreSQL configuration and makes sure that all instances are properly reloaded or restarted, depending on the configuration option. *Current limitations:* changes with `ALTER SYSTEM` are not detected, meaning @@ -209,12 +224,12 @@ The operator can be installed through a Kubernetes manifest via `kubectl apply`, to be used in a traditional Kubernetes installation in public and private cloud environments. Additionally, it can be deployed through the Operator Lifecycle Manager (OLM) from OperatorHub.io and the OpenShift -Container Platform by RedHat. +Container Platform by RedHat. A Helm Chart for the operator is also available. ### Convention over configuration The operator supports the convention over configuration paradigm, deciding -standard default values while allowing users to override them and customize +standard default values while allowing you to override them and customize them. You can specify a deployment of a PostgreSQL cluster using the `Cluster` CRD in a couple of YAML code lines. @@ -287,7 +302,7 @@ an S3 protocol destination URL (for example, to point to a specific folder in an AWS S3 bucket) and, optionally, a generic endpoint URL. WAL archiving, a prerequisite for continuous backup, does not require any further action from the user: the operator will automatically and transparently set -the the `archive_command` to rely on `barman-cloud-wal-archive` to ship WAL +the `archive_command` to rely on `barman-cloud-wal-archive` to ship WAL files to the defined endpoint. Users can decide the compression algorithm. You can define base backups in two ways: on-demand (through the `Backup` @@ -301,7 +316,7 @@ the application container image under GNU GPL 3 terms. ### Full restore from a backup -The operator enables users to bootstrap a new cluster (with its settings) +The operator enables you to bootstrap a new cluster (with its settings) starting from an existing and accessible backup taken using `barman-cloud-backup`. Once the bootstrap process is completed, the operator initiates the instance in recovery mode and replays all available WAL files @@ -311,7 +326,7 @@ from the primary. ### Point-In-Time Recovery (PITR) from a backup -The operator enables users to create a new PostgreSQL cluster by recovering +The operator enables you to create a new PostgreSQL cluster by recovering an existing backup to a specific point-in-time, defined with a timestamp, a label or a transaction ID. This capability is built on top of the full restore one and supports all the options available in @@ -330,6 +345,25 @@ instances in the cluster, through the following formula: 0 <= minSyncReplicas <= maxSyncReplicas < instances ``` +### Replica clusters + +Define a cross Kubernetes cluster topology of PostgreSQL clusters, by taking +advantage of PostgreSQL native streaming and cascading replication. +Through the `replica` option, you can setup an independent cluster to be +continuously replicating data from another PostgreSQL source of the same major +version: such a source can be anywhere, as long as a direct streaming +connection via TLS is allowed from the two endpoints. +Moreover, the source can be even outside Kubernetes, running in a physical or +virtual environment. +Currently, only the `pg_basebackup` bootstrap method is allowed, even though +future implementations will enable bootstrap from a backup, as well as +WAL file shipping instead/on top of WAL streaming. +Replica clusters dramatically improve the business continuity posture of your +PostgreSQL databases in Kubernetes, spanning over multiple datacenters and +opening up for hybrid and multi-cloud setups (currently, manual switchover +across data centers is required, while waiting for Kubernetes federation +native capabilities). + ### Liveness and readiness probes The operator defines liveness and readiness probes for the Postgres @@ -348,7 +382,7 @@ update. ### Scale up and down of replicas -The operator allows users to scale up and down the number of instances in a +The operator allows you to scale up and down the number of instances in a PostgreSQL cluster. New replicas are automatically started up from the primary server and will participate in the cluster's HA infrastructure. The CRD declares a "scale" subresource that allows the user to use the @@ -357,16 +391,16 @@ The CRD declares a "scale" subresource that allows the user to use the ### Maintenance window and PodDisruptionBudget for Kubernetes nodes The operator creates a `PodDisruptionBudget` resource to limit the number of -concurrent disruptions to one. This configuration prevents the maintenance -operation from deleting all the pods in a cluster, allowing the specified -number of instances to be created. -The PodDisruptionBudget will be applied during the node draining operation, -preventing any disruption of the cluster service. +concurrent disruptions to one primary instance. This configuration prevents the +maintenance operation from deleting all the pods in a cluster, allowing the +specified number of instances to be created. The PodDisruptionBudget will be +applied during the node draining operation, preventing any disruption of the +cluster service. While this strategy is correct for Kubernetes Clusters where storage is shared among all the worker nodes, it may not be the best solution for clusters using Local Storage or for clusters installed in a private -cloud. The operator allows users to specify a Maintenance Window and +cloud. The operator allows you to specify a Maintenance Window and configure the reaction to any underlying node eviction. The `ReusePVC` option in the maintenance window section enables to specify the strategy to be used: allocate new storage in a different PVC for the evicted instance or wait @@ -392,13 +426,17 @@ alerting, trending, log processing. This might involve the use of external tools such as Prometheus, Grafana, Fluent Bit, as well as extensions in the PostgreSQL engine for the output of error logs directly in JSON format. +Cloud Native PostgreSQL has been designed to provide everything that is needed +to easily integrate with industry-standard and community accepted tools for +flexible monitoring and logging. + ### Prometheus exporter with configurable queries The instance manager provides a pluggable framework and, via its own web server listening on the `metrics` port (9187), exposes an endpoint to export metrics for the [Prometheus](https://prometheus.io/) monitoring and alerting tool. The operator supports custom monitoring queries defined as `ConfigMap` -and `Secret` objects using a syntax that is compatible with +and/or `Secret` objects using a syntax that is compatible with the [`postgres_exporter` for Prometheus](https://github.com/prometheus-community/postgres_exporter). Cloud Native PostgreSQL provides a set of basic monitoring queries for PostgreSQL that can be integrated and adapted to your context. @@ -412,11 +450,26 @@ As a result, every Pod managed by Cloud Native PostgreSQL can be easily and dire integrated with any downstream log processing stack that supports JSON as source data type. +### Real-time query monitoring + +Cloud Native PostgreSQL transparently and natively manages support for: + +- the essential [`pg_stat_statements` extension](https://www.postgresql.org/docs/current/pgstatstatements.html), + which enables tracking of planning and execution statistics of all SQL + statements executed by a PostgreSQL server. +- the [`auto_explain` extension](https://www.postgresql.org/docs/current/auto-explain.html), + which provides a means for logging execution plans of slow statements + automatically, without having to manually run `EXPLAIN` (helpful for tracking + down un-optimized queries). + ### Audit -EDB Advanced Server allows database and security administrators, auditors, and -operators to track and analyze database activities using the EDB Audit Logging -functionality. +Cloud Native PostgreSQL allows database and security administrators, auditors, +and operators to track and analyze database activities using PGAudit (for +PostgreSQL) and the EDB Audit Logging functionality (for EDB Postgres +Advanced). +Such activities flow directly in the JSON log and can be properly routed to the +correct downstream target using common log brokers like Fluentd. ### Kubernetes events diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx index df0455940ff..5a381838d5d 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/postgresql_conf.mdx @@ -125,6 +125,112 @@ changed. For further information, please refer to the ["Logging" section](logging.md). +### Shared Preload Libraries + +The `shared_preload_libraries` option in PostgreSQL exists to specify one or +more shared libraries to be pre-loaded at server start, in the form of a +comma-separated list. Typically, it is used in PostgreSQL to load those +extensions that need to be available to most database sessions in the whole system +(e.g. `pg_stat_statements`). + +In Cloud Native PostgreSQL the `shared_preload_libraries` option is empty by +default. Although you can override the content of `shared_preload_libraries`, +we recommend that only expert Postgres users take advantage of this option. + +!!! Important + In case a specified library is not found, the server fails to start, + preventing Cloud Native PostgreSQL from any self-healing attempt and requiring + manual intervention. Please make sure you always test both the extensions and + the settings of `shared_preload_libraries` if you plan to directly manage its + content. + +Cloud Native PostgreSQL is able to automatically manage the content of the +`shared_preload_libraries` option for some of the most used PostgreSQL +extensions (see the ["Managed extensions"](#managed-extensions) section below +for details). + +Specifically, as soon as the operator notices that a configuration parameter +requires one of the managed libraries, it will automatically add the needed +library. The operator will also remove the library as soon as no actual parameter +requires it. + +!!! Important + Please always keep in mind that removing libraries from + `shared_preload_libraries` requires a restart of all instances in the cluster + in order to be effective. + +You can provide additional `shared_preload_libraries` via +`.spec.postgresql.shared_preload_libraries` as a list of strings: the operator +will merge them with the ones that it automatically manages. + +### Managed extensions + +As anticipated in the previous section, Cloud Native PostgreSQL automatically +manages the content in `shared_preload_libraries` for some well-known and +supported extensions. The current list includes: + +- `pg_stat_statements` +- `pgaudit` + +Some of these libraries also require additional objects in a database before +using them, normally views and/or functions managed via the `CREATE EXTENSION` +command to be run in a database (the `DROP EXTENSION` command typically removes +those objects). + +For such libraries, Cloud Native PostgreSQL automatically handles the creation +and removal of the extension in all databases that accept a connection in the +cluster, identified by the following query: + +```sql +SELECT datname FROM pg_database WHERE datallowconn +``` + +!!! Note + The above query also includes template databases like `template1`. + +#### Enabling `pg_stat_statements` + +The [`pg_stat_statements`](https://www.postgresql.org/docs/current/pgstatstatements.html) +extension is one of the most important capabilities available in PostgreSQL for +real-time monitoring of queries. + +You can enable `pg_stat_statements` by adding to the configuration a parameter +that starts with `pg_stat_statements.` as in the following example excerpt: + +```yaml + # ... + postgresql: + parameters: + pg_stat_statements.max: 10000 + pg_stat_statements.track: all + # ... +``` + +As explained previously, the operator will automatically add +`pg_stat_statements` to `shared_preload_libraries` and run `CREATE EXTENSION IF +NOT EXISTS pg_stat_statements` on each database, enabling you to run queries +against the `pg_stat_statements` view. + +#### Enabling `auto_explain` + +The [`auto_explain`](https://www.postgresql.org/docs/current/auto-explain.html) +extension provides a means for logging execution plans of slow statements +automatically, without having to manually run `EXPLAIN` (helpful for tracking +down un-optimized queries). + +You can enable `auto_explain` by adding to the configuration a parameter +that starts with `auto_explain.` as in the following example excerpt (which +automatically logs execution plans of queries that take longer than 10 seconds +to complete): + +```yaml + # ... + postgresql: + parameters: + auto_explain.log_min_duration: '10s' + # ... +``` + ## The `pg_hba` section `pg_hba` is a list of PostgreSQL Host Based Authentication rules @@ -190,13 +296,22 @@ Users are not allowed to set the following configuration parameters in the - `archive_command` - `archive_mode` - `archive_timeout` -- `bonjour_name` - `bonjour` +- `bonjour_name` - `cluster_name` - `config_file` - `data_directory` - `data_sync_retry` - `dynamic_shared_memory_type` +- `edb_audit` +- `edb_audit_destination` +- `edb_audit_directory` +- `edb_audit_filename` +- `edb_audit_rotation_day` +- `edb_audit_rotation_seconds` +- `edb_audit_rotation_size` +- `edb_audit_tag` +- `edb_log_every_bulk_value` - `event_source` - `external_pid_file` - `full_page_writes` @@ -220,6 +335,7 @@ Users are not allowed to set the following configuration parameters in the - `promote_trigger_file` - `recovery_end_command` - `recovery_min_apply_delay` +- `recovery_target` - `recovery_target_action` - `recovery_target_inclusive` - `recovery_target_lsn` @@ -227,10 +343,11 @@ Users are not allowed to set the following configuration parameters in the - `recovery_target_time` - `recovery_target_timeline` - `recovery_target_xid` -- `recovery_target` - `restart_after_crash` - `restore_command` - `shared_memory_type` +- `shared_preload_libraries` +- `ssl` - `ssl_ca_file` - `ssl_cert_file` - `ssl_ciphers` @@ -240,10 +357,9 @@ Users are not allowed to set the following configuration parameters in the - `ssl_key_file` - `ssl_max_protocol_version` - `ssl_min_protocol_version` -- `ssl_passphrase_command_supports_reload` - `ssl_passphrase_command` +- `ssl_passphrase_command_supports_reload` - `ssl_prefer_server_ciphers` -- `ssl` - `stats_temp_directory` - `synchronous_standby_names` - `syslog_facility` @@ -255,3 +371,4 @@ Users are not allowed to set the following configuration parameters in the - `unix_socket_permissions` - `wal_level` - `wal_log_hints` + diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx index e6fb2a6db94..775907bc414 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/release_notes.mdx @@ -6,6 +6,57 @@ product: 'Cloud Native Operator' History of user-visible changes for Cloud Native PostgreSQL. +## Version 1.7.0 + +**Release date:** 28 July 2021 + +Features: + +- Add native support to PGAudit with a new type of `logger` called `pgaudit` + directly available in the JSON output +- Enhance monitoring and observability capabilities through: + + - Native support for the `pg_stat_statements` and `auto_explain` extensions + - The `target_databases` option in the Prometheus exporter to run a + user-defined metric query on one or more databases (including + auto-discovery of databases through shell-like pattern matching) + - Exposure of the `manual_switchover_required` metric to promptly report + whether a cluster with `primaryUpdateStrategy` set to `supervised` + requires a manual switchover + +- Transparently handle `shared_preload_libraries` for `pg_audit`, + `auto_explain` and `pg_stat_statements` + + - Automatic configuration of `shared_preload_libraries` for PostgreSQL when + `pg_stat_statements`, ` pgaudit` or `auto_explain` options are added to + the `postgresql` parameters section + +- Support the `k8s.enterprisedb.io/reload` label to finely control the + automated reload of config maps and secrets, including those used for custom + monitoring/alerting metrics in the Prometheus exporter or to store certificates +- Add the `reload` command to the `cnp` plugin for `kubectl` to trigger a + reconciliation loop on the instances +- Improve control of pod affinity and anti-affinity configurations through + `additionalPodAffinity` and `additionalPodAntiAffinity` +- Introduce a separate `PodDisruptionBudget` for primary instances, by + requiring at least a primary instance to run at any time + +Security Enhancements: + +- Add the `.spec.certificates.clientCASecret` and + `spec.certificates.replicationTLSSecret` options to define custom client + Certification Authority and certificate for the PostgreSQL server, to be used + to authenticate client certificates and secure communication between PostgreSQL + nodes +- Add the `.spec.backup.barmanObjectStore.endpointCA` option to define the + custom Certification Authority bundle of the endpoint of Barman’s backup + object store + +Fixes: + +- Correctly parse histograms in the Prometheus exporter +- Reconcile services created by the operator for a cluster + ## Version 1.6.0 **Release date:** 12 July 2021 diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml index e8b787ff622..dd0734eeadc 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-basicauth.yaml @@ -19,11 +19,11 @@ spec: size: 1Gi externalClusters: - - name: cluster-example - connectionParameters: - host: cluster-example-rw.default.svc - user: postgres - dbname: postgres - password: - name: cluster-example-superuser - key: password + - name: cluster-example + connectionParameters: + host: cluster-example-rw.default.svc + user: postgres + dbname: postgres + password: + name: cluster-example-superuser + key: password \ No newline at end of file diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml index bff74a9d4cd..c8c19a26c32 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-clone-tls.yaml @@ -13,18 +13,18 @@ spec: size: 1Gi externalClusters: - - name: cluster-example - connectionParameters: - host: cluster-example-rw.default.svc - user: streaming_replica - sslmode: verify-full - dbname: postgres - sslKey: - name: cluster-example-replication - key: tls.key - sslCert: - name: cluster-example-replication - key: tls.crt - sslRootCert: - name: cluster-example-ca - key: ca.crt + - name: cluster-example + connectionParameters: + host: cluster-example-rw.default.svc + user: streaming_replica + sslmode: verify-full + dbname: postgres + sslKey: + name: cluster-example-replication + key: tls.key + sslCert: + name: cluster-example-replication + key: tls.crt + sslRootCert: + name: cluster-example-ca + key: ca.crt diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-cert-manager.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-cert-manager.yaml new file mode 100644 index 00000000000..7d9d11a7b46 --- /dev/null +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-cert-manager.yaml @@ -0,0 +1,120 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: server-ca +spec: + isCA: true + commonName: my-selfsigned-server-ca + secretName: server-ca-key-pair + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: selfsigned-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: server-ca-issuer +spec: + ca: + secretName: server-ca-key-pair +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-postgres-server-cert + labels: + k8s.enterprisedb.io/reload: "" +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: my-postgres-server-cert +spec: + secretName: my-postgres-server-cert + usages: + - server auth + dnsNames: + - cluster-example-lb.internal.mydomain.net + - cluster-example-rw + - cluster-example-rw.default + - cluster-example-rw.default.svc + - cluster-example-r + - cluster-example-r.default + - cluster-example-r.default.svc + - cluster-example-ro + - cluster-example-ro.default + - cluster-example-ro.default.svc + issuerRef: + name: server-ca-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: client-ca +spec: + isCA: true + commonName: my-selfsigned-client-ca + secretName: client-ca-key-pair + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: selfsigned-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: client-ca-issuer +spec: + ca: + secretName: client-ca-key-pair +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-postgres-client-cert + labels: + k8s.enterprisedb.io/reload: "" +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: my-postgres-client-cert +spec: + secretName: my-postgres-client-cert + usages: + - client auth + commonName: streaming_replica + issuerRef: + name: client-ca-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + certificates: + serverTLSSecret: my-postgres-server-cert + serverCASecret: my-postgres-server-cert + clientCASecret: my-postgres-client-cert + replicationTLSSecret: my-postgres-client-cert + storage: + size: 1Gi diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-custom.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-custom.yaml index 4d9ad05ad76..49223affa68 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-custom.yaml +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-custom.yaml @@ -15,6 +15,7 @@ spec: # and password from the secret cluster-example-custom-app - host all all all md5 + # Example of rolling update strategy: # - unsupervised: automated update of the primary once all # replicas have been upgraded (default) diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml index ce7e649538f..1948023f23a 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-full.yaml @@ -46,6 +46,9 @@ spec: postgresql: parameters: shared_buffers: 256MB + pg_stat_statements.max: 10000 + pg_stat_statements.track: all + auto_explain.log_min_duration: '10s' pg_hba: - host all all 10.244.0.0/16 md5 diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-initdb.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-initdb.yaml index bd3dc31427e..31a014f535e 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-initdb.yaml +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-initdb.yaml @@ -10,7 +10,7 @@ spec: database: appdb owner: appuser options: - - "-k" - - "--locale=en_US" + - "-k" + - "--locale=en_US" storage: size: 1Gi diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-monitoring.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-monitoring.yaml index edf06934295..722b9a5916b 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-monitoring.yaml +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-example-monitoring.yaml @@ -21,6 +21,8 @@ kind: ConfigMap metadata: namespace: default name: example-monitoring + labels: + k8s.enterprisedb.io/reload: "" data: custom-queries: | pg_replication: @@ -39,6 +41,8 @@ data: description: "Time at which postgres started" pg_stat_user_tables: + target_databases: + - "*" query: | SELECT current_database() datname, @@ -219,6 +223,8 @@ kind: Secret metadata: namespace: default name: example-monitoring-secret + labels: + k8s.enterprisedb.io/reload: "" stringData: pg-database: | pg_database: diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-basicauth.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-basicauth.yaml index 02822d92d08..a44bf799715 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-basicauth.yaml +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-basicauth.yaml @@ -23,11 +23,11 @@ spec: size: 1Gi externalClusters: - - name: cluster-example - connectionParameters: - host: cluster-example-rw.default.svc - user: postgres - dbname: postgres - password: - name: cluster-example-superuser - key: password + - name: cluster-example + connectionParameters: + host: cluster-example-rw.default.svc + user: postgres + dbname: postgres + password: + name: cluster-example-superuser + key: password \ No newline at end of file diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-tls.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-tls.yaml index 4b9cdcebd52..c879dcb2d67 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-tls.yaml +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cluster-replica-tls.yaml @@ -17,18 +17,18 @@ spec: size: 1Gi externalClusters: - - name: cluster-example - connectionParameters: - host: cluster-example-rw.default.svc - user: streaming_replica - sslmode: verify-full - dbname: postgres - sslKey: - name: cluster-example-replication - key: tls.key - sslCert: - name: cluster-example-replication - key: tls.crt - sslRootCert: - name: cluster-example-ca - key: ca.crt + - name: cluster-example + connectionParameters: + host: cluster-example-rw.default.svc + user: streaming_replica + sslmode: verify-full + dbname: postgres + sslKey: + name: cluster-example-replication + key: tls.key + sslCert: + name: cluster-example-replication + key: tls.crt + sslRootCert: + name: cluster-example-ca + key: ca.crt diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cnp-basic-monitoring.yaml b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cnp-basic-monitoring.yaml index 3c89291d04d..75c6a220c4f 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cnp-basic-monitoring.yaml +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/samples/cnp-basic-monitoring.yaml @@ -2,15 +2,17 @@ apiVersion: v1 kind: ConfigMap metadata: name: cnp-basic-monitoring + labels: + k8s.enterprisedb.io/reload: "" data: custom-queries: | - backend_summary: + backends: query: | SELECT sa.datname - , states.state , sa.usename , sa.application_name - , COALESCE(sa.count, 0) AS backends + , states.state + , COALESCE(sa.count, 0) AS total , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds FROM ( VALUES ('active') , ('idle') @@ -24,7 +26,7 @@ data: , state , usename , COALESCE(application_name, '') AS application_name - , count(*) + , COUNT(*) , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs FROM pg_stat_activity GROUP BY datname, state, usename, application_name @@ -34,24 +36,51 @@ data: - datname: usage: "LABEL" description: "Name of the database" - - state: - usage: "LABEL" - description: "State of the backend" - usename: usage: "LABEL" description: "Name of the user" - application_name: usage: "LABEL" description: "Name of the application" - - backends: + - state: + usage: "LABEL" + description: "State of the backend" + - total: usage: "GAUGE" description: "Number of backends" - max_tx_duration_seconds: usage: "GAUGE" description: "Maximum duration of a transaction in seconds" + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + pg_database: - query: "SELECT d.datname, pg_database_size(d.datname) as size_bytes FROM pg_database d" + query: | + SELECT datname + , pg_database_size(datname) AS size_bytes + , age(datfrozenxid) AS age + FROM pg_database metrics: - datname: usage: "LABEL" @@ -59,13 +88,18 @@ data: - size_bytes: usage: "GAUGE" description: "Disk space used by the database" + - age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" pg_postmaster: - query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()" + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_postmaster_start_time() metrics: - - start_time_seconds: + - start_time: usage: "GAUGE" - description: "Time at which postgres started" + description: "Time at which postgres started (based on epoch)" pg_replication: query: "SELECT CASE WHEN NOT pg_is_in_recovery() THEN 0 ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) END AS lag" @@ -91,7 +125,12 @@ data: description: "Replication lag in bytes" pg_stat_archiver: - query: "SELECT archived_count, failed_count, extract(epoch from now() - last_archived_time) AS last_archived_age FROM pg_stat_archiver" + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + FROM pg_stat_archiver metrics: - archived_count: usage: "COUNTER" @@ -99,12 +138,26 @@ data: - failed_count: usage: "COUNTER" description: "Number of failed attempts for archiving WAL files" - - last_archived_age: + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: usage: "GAUGE" - description: "Time in seconds since last WAL segment was successfully archived" + description: "Seconds since the last failed archival operation" pg_stat_bgwriter: - query: "SELECT checkpoints_timed, checkpoints_req, checkpoint_write_time, checkpoint_sync_time, buffers_checkpoint, buffers_clean, maxwritten_clean, buffers_backend, buffers_backend_fsync, buffers_alloc, stats_reset FROM pg_stat_bgwriter" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_stat_bgwriter metrics: - checkpoints_timed: usage: "COUNTER" @@ -136,9 +189,75 @@ data: - buffers_alloc: usage: "COUNTER" description: "Number of buffers allocated" - - stats_reset: + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: usage: "COUNTER" - description: "Time at which these statistics were last reset" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" pg_stat_database_conflicts: query: "SELECT datname, confl_tablespace, confl_lock, confl_snapshot, confl_bufferpin, confl_deadlock FROM pg_stat_database_conflicts" @@ -262,15 +381,17 @@ data: pg_stat_replication: query: | SELECT usename - , COALESCE(application_name, '') AS application_name - , COALESCE(client_addr::text, '') AS client_addr - , pg_wal_lsn_diff(pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes - , pg_wal_lsn_diff(pg_current_wal_lsn(), write_lsn) AS write_diff_bytes - , pg_wal_lsn_diff(pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes - , COALESCE(pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes - , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds - , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds - , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(age(backend_xmin), 0) AS backend_xmin_age + , pg_wal_lsn_diff(pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_wal_lsn_diff(pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_wal_lsn_diff(pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds FROM pg_stat_replication metrics: - usename: @@ -282,6 +403,12 @@ data: - client_addr: usage: "LABEL" description: "Client IP address" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" - sent_diff_bytes: usage: "GAUGE" description: "Difference in bytes from the last write-ahead log location sent on this connection" @@ -343,25 +470,15 @@ data: usage: "COUNTER" description: "Number of buffer hits in this table's TOAST table indexes (if any)" - waiting_backends: + pg_settings: query: | - SELECT count(*) AS total - FROM pg_catalog.pg_locks blocked_locks - JOIN pg_catalog.pg_locks blocking_locks - ON blocking_locks.locktype = blocked_locks.locktype - AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database - AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation - AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page - AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple - AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid - AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid - AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid - AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid - AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid - AND blocking_locks.pid != blocked_locks.pid - JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid - WHERE NOT blocked_locks.granted + SELECT name, setting + FROM pg_settings + WHERE vartype NOT IN ('string', 'enum', 'bool') metrics: - - total: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: usage: "GAUGE" - description: "Total number of backends that are currently waiting on other queries" + description: "Setting value" diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/scheduling.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/scheduling.mdx new file mode 100644 index 00000000000..b8cd2ceeb41 --- /dev/null +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/scheduling.mdx @@ -0,0 +1,151 @@ +--- +title: 'Scheduling' +originalFilePath: 'src/scheduling.md' +product: 'Cloud Native Operator' +--- + +Scheduling, in Kubernetes, is the process responsible for placing a new pod on +the best node possible, based on several criteria. + +!!! Seealso "Kubernetes documentation" + Please refer to the + [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/) + for more information on scheduling, including all the available policies. In + this page we assume you are familiar with concepts like affinity, + anti-affinity, node selectors, and so on. + +You can control how the Cloud Native PostgreSQL cluster's instances should be +scheduled through the [`affinity`](api_reference.md#AffinityConfiguration) +section in the definition of the cluster, which supports: + +- pod affinity/anti-affinity +- node selectors +- tolerations + +!!! Info + Cloud Native PostgreSQL does not support pod templates for finer control + on the scheduling of workloads. While they were part of the initial concept, + the development team decided to postpone their introduction in a newer + version of the API (most likely v2 of CNP). + +## Pod affinity and anti-affinity + +Kubernetes allows you to control which nodes a pod should (*affinity*) or +should not (*anti-affinity*) be scheduled, based on the actual workloads already +running in those nodes. +This is technically known as **inter-pod affinity/anti-affinity**. + +Cloud Native PostgreSQL by default will configure the cluster's instances +preferably on different nodes, resulting in the following `affinity` definition: + +```yaml +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: postgresql + operator: In + values: + - cluster-example + topologyKey: kubernetes.io/hostname + weight: 100 +``` + +As a result of the following Cluster spec: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + imageName: quay.io/enterprisedb/postgresql:13.3 + + affinity: + enablePodAntiAffinity: true #default value + topologyKey: kubernetes.io/hostname #defaul value + podAntiAffinityType: preferred #default value + + storage: + size: 1Gi +``` + +Therefore, Kubernetes will *prefer* to schedule a 3-node PostgreSQL cluster over 3 +different nodes - resources permitting. + +The aforementioned default behavior can be changed by tweaking the above settings. + +`podAntiAffinityType` can be set to `required`: resulting in +`requiredDuringSchedulingIgnoredDuringExecution` being used instead of +`preferredDuringSchedulingIgnoredDuringExecution`. Please, be aware that such a +strong requirement might result in pending instances in case resources are not +available (which is an expected condition when using +[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) +for automated horizontal scaling of a Kubernetes cluster). + +!!! Seealso "Inter-pod affinity and anti-affinity" + More information on this topic in the + [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity). + +Another possible value for `topologyKey` in a cloud environment can be +`topology.kubernetes.io/zone`, to be sure pods will be spread across +availability zones and not just nodes. Please refer to +["Well-Known Labels, Annotations and Taints"](https://kubernetes.io/docs/reference/labels-annotations-taints/) +for more options. + +You can disable the operator's generated anti-affinity policies by setting +`enablePodAntiAffinity` to false. + +Additionally, in case a more fine-grained control is needed, you can specify a +list of custom pod affinity or anti-affinity rules via the +`additionalPodAffinity` and `additionalPodAntiAffinity` configuration +attributes. These rules will be added to the ones generated by the operator, +if enabled, or passed transparently otherwise. + +!!! Note + You have to pass to `additionalPodAntiAffinity` or `additionalPodAffinity` + the whole content of `podAntiAffinity` or `podAffinity` that is expected by the + Pod spec (please look at the following YAML as an example of having only one + instance of PostgreSQL running on every worker node, regardless which + PostgreSQL cluster they belong to). + +```yaml + additionalPodAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: postgresql + operator: Exists + values: [] + topologyKey: "kubernetes.io/hostname" +``` + +## Node selection through `nodeSelector` + +Kubernetes allows `nodeSelector` to provide a list of labels (defined as +key-value pairs) to select the nodes on which a pod can run. Specifically, +the node must have each indicated key-value pair as labels for the +pod to be scheduled and run. + +Similarly, Cloud Native PostgreSQL consents you to define a `nodeSelector` in the +`affinity` section, so that you can request a PostgreSQL cluster to run only +on nodes that have those labels. + +## Tolerations + +Kubernetes allows you to specify, through `taints`, whether a node should repel +all pods not explicitly tolerating (through `tolerations`) their `taints`. + +So, by setting a proper set `tolerations` for a workload matching a specific +node's `taints`, Kubernetes scheduler will take into consideration also the +tainted node, while deciding on which node to schedule the new workload. +Tolerations can be configured for all the pods of a Cluster through the +`.spec.affinity.tolerations` section, which accepts the usual Kubernetes syntax +for tolerations. + +!!! Seealso "Taints and Tolerations" + More information on taints and tolerations can be found in the + [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx index 3e422be50b8..0067917f96e 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/ssl_connections.mdx @@ -4,10 +4,14 @@ originalFilePath: 'src/ssl_connections.md' product: 'Cloud Native Operator' --- +!!! Seealso "Certificates" + Please refer to the ["Certificates"](certificates.md) + page for more details on how Cloud Native PostgreSQL supports TLS certificates. + The Cloud Native PostgreSQL operator has been designed to work with TLS/SSL for both encryption in transit and authentication, on server and client sides. Clusters created using the CNP operator come with a Certification Authority (CA) to create and sign TLS client certificates. Through the `cnp` plugin for `kubectl` you can -issue a new TLS client certificate which can be used to authenticate a user in lieu of passwords. +issue a new TLS client certificate which can be used to authenticate a user instead of using passwords. Please refer to the following steps to authenticate via TLS/SSL certificates, which assume you have installed a cluster using the [cluster-example.yaml](../samples/cluster-example.yaml) deployment manifest. @@ -15,10 +19,9 @@ According to the convention over configuration paradigm, that file automatically which is owned by a user called `app` (you can change this convention through the `initdb` configuration in the `bootstrap` section). - ## Issuing a new certificate -!!! See also "About CNP plugin for kubectl" +!!! Seealso "About CNP plugin for kubectl" Please refer to the ["Certificates" section in the "Cloud Native PostgreSQL Plugin"](cnp-plugin.md#certificates) page for details on how to use the plugin for `kubectl`. @@ -56,7 +59,7 @@ Certificate: Subject: CN = app ``` -As you can see, TLS client certificates by default are created with one year of validity, and with a simple CN that +As you can see, TLS client certificates by default are created with 90 days of validity, and with a simple CN that corresponds to the username in PostgreSQL. This is necessary to leverage the `cert` authentication method for `hostssl` entries in `pg_hba.conf`. @@ -122,9 +125,10 @@ spec: This Pod will mount secrets managed by the Cloud Native PostgreSQL operator, including: -* TLS client certificate -* TLS client certificate private key -* TLS Certification Authority certificate +* `sslcert`: the TLS client public certificate +* `sslkey`: the TLS client certificate private key +* `sslrootcert`: the TLS Certification Authority certificate, that signed the certificate on + the server to be used to verify the identity of the instances They will be used to create the default resources that `psql` (and other libpq based applications like `pgbench`) requires to establish a TLS encrypted connection to the Postgres database. @@ -146,9 +150,9 @@ authentication using TLS certificates we just created. A readiness probe has been configured to ensure that the application is ready when the database server can be reached. -You can verify that the connection works by executing an interactive `bash` inside the Pod's container to run `psql` using the necessary -options. The PostgreSQL server is exposed through the read-write Kubernetes service. We will point the `psql` -command to connect to this service: +You can verify that the connection works by executing an interactive `bash` inside the Pod's container to run `psql` +using the necessary options. The PostgreSQL server is exposed through the read-write Kubernetes service. We will point +the `psql` command to connect to this service: ```shell kubectl exec -it cert-test -- bash -c "psql diff --git a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx index ae0f656c123..ac75fbcb625 100644 --- a/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx +++ b/advocacy_docs/kubernetes/cloud_native_postgresql/storage.mdx @@ -5,13 +5,13 @@ product: 'Cloud Native Operator' --- **Storage is the most critical component in a database workload**. -Expectations are for storage to be always available, scale, perform well, +Storage should be always available, scale, perform well, and guarantee consistency and durability. The same expectations and requirements that apply to traditional environments, such as virtual machines and bare metal, are also valid in container contexts managed by Kubernetes. !!! Important - Kubernetes has its own specificities when it comes to dynamically + Kubernetes has its own specificities, when it comes to dynamically provisioned storage. These include *storage classes*, *persistent volumes*, and *persistent volume claims*. You need to own these concepts, on top of all the valuable knowledge you have built over @@ -88,7 +88,7 @@ spec: ``` Using the previous configuration, the generated PVCs will be satisfied by the default storage -class. If the target Kubernetes cluster has no default storage class, or if you need your PVCs +class. If the target Kubernetes cluster has no default storage class, or even if you need your PVCs to satisfied by a known storage class, you can set it into the custom resource: ```yaml @@ -157,10 +157,97 @@ that, you will need to delete the Pod to trigger the resize. The best way to proceed is to delete one Pod at a time, starting from replicas and waiting for each Pod to be back up. +### Workaround for volume expansion on AKS + +This paragraph covers [Azure issue on AKS storage classes](https://github.com/Azure/AKS/issues/1477), that are supposed to support +online resizing, but they actually require the following workaround. + +Let's suppose you have a cluster with three replicas: + +``` +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +cluster-example-1 1/1 Running 0 2m37s +cluster-example-2 1/1 Running 0 2m22s +cluster-example-3 1/1 Running 0 2m10s +``` + +An Azure disk can only be expanded while in "unattached" state, as described in the +[docs](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/known-issues/sizegrow.md). +This means, that to resize a disk used by a PostgresSQL cluster, you will need to perform a manual rollout, +first cordoning the node that hosts the Pod using the PVC bound to the disk. This will prevent the Operator +to recreate the Pod and immediately reattach it to its PVC before the background disk resizing has been completed. + +First step is to edit the cluster definition applying the new size, let's say "2Gi", as follows: + +``` +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + + storage: + storageClass: default + size: 2Gi +``` + +Assuming the `cluster-example-1` Pod is the cluster's primary, we can proceed with the replicas first. +For example start with cordoning the kubernetes node that hosts the `cluster-example-3` Pod: + +``` +kubectl cordon +``` + +Then delete the `cluster-example-3` Pod: + +``` +$ kubectl delete pod/cluster-example-3 +``` + +Run the following command: + +``` +kubectl get pvc -w -o=jsonpath='{.status.conditions[].message}' cluster-example-3 +``` + +Wait until you see the following output: + +``` +Waiting for user to (re-)start a Pod to finish file system resize of volume on node. +``` + +Then, you can uncordon the node: + +``` +kubectl uncordon +``` + +Wait for the Pod to be recreated correctly and get in Running and Ready state: + +``` +kubectl get pods -w cluster-example-3 +cluster-example-3 0/1 Init:0/1 0 12m +cluster-example-3 1/1 Running 0 12m +``` + +Now verify the PVC expansion by running the following command, which should return "2Gi" as configured: + +``` +kubectl get pvc cluster-example-3 -o=jsonpath='{.status.capacity.storage}' +``` + +So, you can repeat these steps for the remaining Pods. + +!!! Important + Please leave the resizing of the disk associated with the primary instance as last disk, + after promoting through a switchover a new resized Pod, using `kubectl cnp promote` (e.g. `kubectl cnp promote cluster-example 3` to promote `cluster-example-3` to primary). + ### Recreating storage -Suppose the storage class does not support volume expansion. In that case, you can still regenerate your cluster -on different PVCs by allocating new PVCs with increased storage and then move the +IF the storage class does not support volume expansion, you can still regenerate your cluster +on different PVCs, by allocating new PVCs with increased storage and then move the database there. This operation is feasible only when the cluster contains more than one node. While you do that, you need to prevent the operator from changing the existing PVC @@ -180,7 +267,7 @@ spec: resizeInUseVolumes: False ``` -To move the entire cluster to a different storage area, you need to recreate all the PVCs and +In order to move the entire cluster to a different storage area, you need to recreate all the PVCs and all the Pods. Let's suppose you have a cluster with three replicas like in the following example: diff --git a/docker/images/Dockerfile.pdf-builder b/docker/images/Dockerfile.pdf-builder index 2153b725871..eae67428b87 100644 --- a/docker/images/Dockerfile.pdf-builder +++ b/docker/images/Dockerfile.pdf-builder @@ -1,9 +1,9 @@ FROM ubuntu:focal -ARG PANDOC_VERSION=2.12 -ARG PANDOC_DEB=pandoc-2.12-1-amd64.deb -ARG WKHTML_TO_PDF_VERSION=0.12.6-1 -ARG WKHTML_TO_PDF_DEB=wkhtmltox_${WKHTML_TO_PDF_VERSION}.focal_amd64.deb +ARG PANDOC_VERSION=2.14.1 +ARG PANDOC_DEB=pandoc-${PANDOC_VERSION}-1-amd64.deb +ARG WKHTML_TO_PDF_VERSION=0.12.6 +ARG WKHTML_TO_PDF_DEB=wkhtmltox_${WKHTML_TO_PDF_VERSION}-1.focal_amd64.deb RUN apt-get update && apt-get install --no-install-recommends -y \ python3 \ @@ -12,14 +12,14 @@ RUN apt-get update && apt-get install --no-install-recommends -y \ && rm -rf /var/lib/apt/lists/* # Install Pandoc -RUN wget --no-check-certificate -P /tmp https://github.com/jgm/pandoc/releases/download/${PANDOC_VERSION}/${PANDOC_DEB} \ +RUN wget --quiet --no-check-certificate -P /tmp https://github.com/jgm/pandoc/releases/download/${PANDOC_VERSION}/${PANDOC_DEB} \ && apt-get install --no-install-recommends -y /tmp/${PANDOC_DEB} \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ && rm -rf /tmp/* # Install wkhtmltopdf -RUN wget --no-check-certificate -P /tmp https://github.com/wkhtmltopdf/packaging/releases/download/${WKHTML_TO_PDF_VERSION}/${WKHTML_TO_PDF_DEB} \ +RUN wget --quiet --no-check-certificate -P /tmp https://github.com/wkhtmltopdf/packaging/releases/download/${WKHTML_TO_PDF_VERSION}-1/${WKHTML_TO_PDF_DEB} \ && apt-get update \ && apt-get install --no-install-recommends -y /tmp/${WKHTML_TO_PDF_DEB} \ && apt-get clean \ diff --git a/gatsby-node.js b/gatsby-node.js index 38751e1a85c..70995caaed6 100644 --- a/gatsby-node.js +++ b/gatsby-node.js @@ -26,8 +26,33 @@ const { writeFile, } = require("./src/constants/gatsby-utils.js"); -const isBuild = process.env.NODE_ENV === "production"; -const isProduction = process.env.APP_ENV === "production"; +const gitData = (() => { + // if this build was triggered by a GH action in response to a PR, + // use the head ref (the branch that someone is requesting be merged) + let branch = process.env.GITHUB_HEAD_REF; + // if this process was otherwise triggered by a GH action, use the current branch name + if (!branch) branch = process.env.GITHUB_REF; + // assuming this is triggered by a GH action, this will be the commit that triggered the workflow + let sha = process.env.GITHUB_SHA; + // non-GH Action build? Try actually running Git for the name & sha... + if (!branch) { + try { + branch = execSync("git rev-parse --abbrev-ref HEAD").toString(); + sha = execSync("git rev-parse HEAD").toString(); + } catch {} + } + if (!branch) + branch = process.env.APP_ENV === "production" ? "main" : "develop"; + if (!sha) sha = ""; + + branch = branch + .trim() + .replace(/^refs\/heads\//, "") + .replace(/^refs\/tags\//, ""); + sha = sha.trim(); + + return { branch, sha }; +})(); const currentBranchName = (() => { // if this build was triggered by a GH action in response to a PR, @@ -256,11 +281,13 @@ const createDoc = (navTree, prevNext, doc, productVersions, actions) => { const isIndexPage = isPathAnIndexPage(doc.fileAbsolutePath); const docsRepoUrl = "https://github.com/EnterpriseDB/docs"; + // don't encourage folks to edit on main - set the edit links to develop in production builds + const branch = gitData.branch === "main" ? "develop" : gitData.branch; const fileUrlSegment = removeTrailingSlash(doc.fields.path) + (isIndexPage ? "/index.mdx" : ".mdx"); - const githubFileLink = `${docsRepoUrl}/commits/${currentBranchName}/product_docs/docs${fileUrlSegment}`; - const githubEditLink = `${docsRepoUrl}/edit/${currentBranchName}/product_docs/docs${fileUrlSegment}`; + const githubFileLink = `${docsRepoUrl}/commits/${branch}/product_docs/docs${fileUrlSegment}`; + const githubEditLink = `${docsRepoUrl}/edit/${branch}/product_docs/docs${fileUrlSegment}`; const githubIssuesLink = `${docsRepoUrl}/issues/new?title=Feedback%20on%20${encodeURIComponent( fileUrlSegment, )}`; @@ -314,12 +341,14 @@ const createAdvocacy = (navTree, prevNext, doc, learn, actions) => { ); const advocacyDocsRepoUrl = "https://github.com/EnterpriseDB/docs"; + // don't encourage folks to edit on main - set the edit links to develop in production builds + const branch = gitData.branch === "main" ? "develop" : gitData.branch; const isIndexPage = isPathAnIndexPage(doc.fileAbsolutePath); const fileUrlSegment = removeTrailingSlash(doc.fields.path) + (isIndexPage ? "/index.mdx" : ".mdx"); - const githubFileLink = `${advocacyDocsRepoUrl}/commits/${currentBranchName}/advocacy_docs${fileUrlSegment}`; - const githubEditLink = `${advocacyDocsRepoUrl}/edit/${currentBranchName}/advocacy_docs${fileUrlSegment}`; + const githubFileLink = `${advocacyDocsRepoUrl}/commits/${branch}/advocacy_docs${fileUrlSegment}`; + const githubEditLink = `${advocacyDocsRepoUrl}/edit/${branch}/advocacy_docs${fileUrlSegment}`; const githubIssuesLink = `${advocacyDocsRepoUrl}/issues/new?title=Regarding%20${encodeURIComponent( fileUrlSegment, )}`; @@ -389,21 +418,6 @@ exports.sourceNodes = async ({ createContentDigest, }) => { // create edb-git node - const sha = ( - await new Promise((resolve, reject) => { - exec("git rev-parse HEAD", (error, stdout, stderr) => resolve(stdout)); - }) - ).trim(); - - const branch = ( - await new Promise((resolve, reject) => { - exec("git branch --show-current", (error, stdout, stderr) => - resolve(stdout), - ); - }) - ).trim(); - - const gitData = { sha, branch }; createNode({ ...gitData, id: createNodeId("edb-git"), diff --git a/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx b/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx index f8beb010152..0d9dbc72235 100644 --- a/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx +++ b/merge_sources/kubernetes/cloud_native_postgresql/interactive_demo.mdx @@ -65,7 +65,7 @@ You will see one node called `minikube`. If the status isn't yet "Ready", wait f Now that the Minikube cluster is running, you can proceed with Cloud Native PostgreSQL installation as described in the ["Installation"](installation_upgrade.md) section: ```shell -kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.6.0.yaml +kubectl apply -f https://get.enterprisedb.io/cnp/postgresql-operator-1.7.0.yaml __OUTPUT__ namespace/postgresql-operator-system created customresourcedefinition.apiextensions.k8s.io/backups.postgresql.k8s.enterprisedb.io created @@ -278,7 +278,7 @@ curl -sSfL \ sudo sh -s -- -b /usr/local/bin __OUTPUT__ EnterpriseDB/kubectl-cnp info checking GitHub for latest tag -EnterpriseDB/kubectl-cnp info found version: 1.6.0 for v1.6.0/linux/x86_64 +EnterpriseDB/kubectl-cnp info found version: 1.7.0 for v1.7.0/linux/x86_64 EnterpriseDB/kubectl-cnp info installed /usr/local/bin/kubectl-cnp ``` diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/01_introduction.mdx b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/01_introduction.mdx index 05d847a6e66..ccb1933194f 100644 --- a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/01_introduction.mdx +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/01_introduction.mdx @@ -1,56 +1,25 @@ --- -title: "Architecture Overview" +title: "Failover Manager" --- -This guide explains how to configure Failover Manager and Pgpool best to leverage the benefits that they provide for Advanced Server. Using the reference architecture described in the Architecture section, you can learn how to achieve high availability by implementing an automatic failover mechanism (with Failover Manager) while scaling the system for larger workloads and an increased number of concurrent clients with read-intensive or mixed workloads to achieve horizontal scaling/read-scalability (with Pgpool). +Failover Manager is a high-availability module that monitors the health +of a Postgres streaming replication cluster and verifies failures +quickly. When a database failure occurs, Failover Manager can +automatically promote a streaming replication Standby node into a +writable Primary node to ensure continued performance and protect +against data loss with minimal service interruption. -The architecture described in this document has been developed and tested for EFM 4.2, EDB pgPool 4.2, and Advanced Server 13. +A Failover Manager cluster is comprised of Failover Manager processes +that reside on the following hosts on a network: -Documentation for Advanced Server and Failover Manager are available from EnterpriseDB at: +- A Primary node is the Primary database server that is servicing + database clients. - +- One or more Standby nodes are streaming replication servers + associated with the Primary node. -Documentation for pgPool-II can be found at: - - - -## Failover Manager Overview - -Failover Manager is a high-availability module that monitors the health of a Postgres streaming replication cluster and verifies failures quickly. When a database failure occurs, Failover Manager can automatically promote a streaming replication Standby node into a writable Primary node to ensure continued performance and protect against data loss with minimal service interruption. - -**Basic EFM Architecture Terminology** - -A Failover Manager cluster is comprised of EFM processes that reside on the following hosts on a network: - -- A **Primary** node is the Primary database server that is servicing database clients. -- One or more **Standby nodes** are streaming replication servers associated with the Primary node. -- The **Witness node** confirms assertions of either the Primary or a Standby in a failover scenario. If, during a failure situation, the Primary finds itself in a partition with half or more of the nodes, it will stay Primary. As such, EFM supports running in a cluster with an even number of agents. - -## Pgpool-II Overview - -Pgpool-II (Pgpool) is an open-source application that provides connection pooling and load balancing for horizontal scalability of SELECT queries on multiple Standbys in EPAS and community Postgres clusters. For every backend, a backend_weight parameter can set the ratio of read traffic to be directed to the backend node. To prevent read traffic on the Primary node, the backend_weight parameter can be set to 0. In such cases, data modification language (DML) queries (i.e., INSERT, UPDATE, and DELETE) will still be sent to the Primary node, while read queries are load-balanced to the Standbys, providing scalability with mixed and read-intensive workloads. - -EnterpriseDB supports the following Pgpool functionality: - -- Load balancing -- Connection pooling -- High availability -- Connection limits - -### PCP Overview - -Pgpool provides an interface called PCP for administrators that performs management operations such as retrieving the status of Pgpool or terminating Pgpool processes remotely. PCP commands are UNIX commands that manipulate Pgpool via the network. - -### Pgpool Watchdog - -`watchdog` is an optional sub process of Pgpool that provides a high availability feature. Features added by `watchdog` include: - -- Health checking of the pgpool service -- Mutual monitoring of other watchdog processes -- Changing leader/Standby state if certain faults are detected -- Automatic virtual IP address assigning synchronous to server switching -- Automatic registration of a server as a Standby during recovery - -More information about the `Pgpool watchdog` component can be found at: - - +- The Witness node confirms assertions of either the Primary or a + Standby in a failover scenario. If during a failure situation, the + Primary finds itself in a partition with half or more of the nodes, + it will stay Primary. As such, Failover Manager supports running in + a cluster with an even number of agents. \ No newline at end of file diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/02_architecture.mdx b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/02_architecture.mdx index d2ae10a0d3a..809fb3a4bc5 100644 --- a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/02_architecture.mdx +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/02_architecture.mdx @@ -1,28 +1,56 @@ --- -title: "Architecture" +title: "Supported architectures for HA with Failover Manager" --- -![A typical EFM and Pgpool configuration](images/edb_ha_architecture.png) +For ensuring the high availability of your database, core features of Failover Manager can be combined with the Postgres connection libraries (client connect failover) and/or connection poolers. -The sample architecture diagram shows four nodes as described in the table below: +With the capabilities of Failover Manager, EDB has designed four basic +architectures to run a High availability environment: -| **Systems** | **Components** | -| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Primary Pgpool/EFM witness node | The Primary Pgpool node will only run Pgpool, and EFM witness, as such leaving as much resources available to Pgpool as possible. During normal runmode (no Pgpool Failovers), the Primary Pgpool node has attached the Virtual IP address, and all applications connect through the Virtual IP address to Pgpool. Pgpool will forward all write traffic to the Primary Database node, and will balance all read across all Standby nodes.On the Primary Pgpool node, the EFM witness process ensures that a minimum quota of three EFM agents remains available even if one of the database nodes fails. Some examples are when a node is already unavailable due to maintenance, or failure, and another failure occurs. | -| Primary Database node | The Primary Database node will only run Postgres (Primary)and EFM, leaving all resources to Postgres. Read/Write traffic (i.e., INSERT, UPDATE, DELETE) is forwarded to this node by the Primary Pgpool node. | -| Standby nodes | The Standby nodes are running Postgres (Standby), EFM and an inactive Pgpool process. In case of a Primary database failure, EFM will promote Postgres on one of these Standby nodes to handle read-write traffic. In case of a Primary Pgpool failure, the Pgpool watchdog will activate Pgpool on one of the Standby nodes which will attach the VIP, and handle the forwarding of the application connections to the Database nodes. Note that in a double failure situation (both the Primary Pgpool node and the Primary Database node are in failure), both of these Primary processes might end up on the same node. | +1. [Failover Manager using VIP (virtual IP)](03_efm_vip): Failover Manager has a key +capability to manage VIP addresses out of the box. VIP addresses +allow applications to connect to a single IP address, which is being +routed to the Primary database server. This architecture is the most +basic solution to run when VIP addresses are available in your +environment. -This architecture: +2. [Failover Manager using client connect failover](04_efm_client_connect_failover): +PostgreSQL client libraries like libpq and jdbc allow for Client +Connection Failover. With Client Connection failover, the connection +string will contain multiple servers (host=srv1,srv2), and the +client library will loop over the available hosts to find a +connection that is available and capable of Read/Write operations. +This capability allows clients to follow the master during a +switchover. This solution does not rely on Virtual IP addresses and +can be used in every environment where such client configurations +can be set. -- Achieves high availability by providing two Standbys that can be promoted in case of a Primary Postgres node failure. -- Achieves high availability by providing at least three Pgpool processes in a watchdog configuration. -- Increases performance with mixed and read-intensive workloads by introducing increased read scalability with more than one Standby for load balancing. -- Reduces load on the Primary database node by redirecting read-only traffic with the Primary pgpool node. -- Prevents resource contention between Pgpool and Postgres on the Primary Database node. By not running Pgpool on the Primary database node, the Primary Postgres process can utilize as much resources as possible. -- Prevents resource contention between pgpool and Postgres on the Primary Pgpool node. By not running Standby databases on the Primary Pgpool node, Pgpool can utilize as many resources as possible. -- Optionally, synchronous replication can be set up to achieve near-zero data loss in a failure event. +3. [Failover Manager with PgBouncer](05_efm_pgbouncer): PgBouncer adds a lot of capabilities such as connection pooling and the option to halt traffic. It can additionally be used as a proxy between the Client and the Postgres Database Server. By leveraging the integration options in Failover Manager to run reconfiguration of PgBouncer during a +Failover, PgBouncer can be used to route the traffic to the correct +primary database server. -!!! Note - The architecture also allows us to completely separate 3 virtual machines running Postgres from 3 virtual machines running Pgpool. This kind of setup requires 2 extra virtual machines, but it is a better choice if you want to prevent resource contention between Pgpool and Postgres in Failover scenarios. In this setup, the architecture can run without an extra 7th node running the EFM Witness Process. To increase failure resolution efm witness agents could be deployed on the Pgpool servers. +4. [Failover Manager with Pgpool](06_efm_pgpool): Pgpool-II is +another tool that is used as a proxy between the Client and the +Postgres Database Server. Pgpool-II adds a lot of capabilities such +as running in cluster mode with a Watchdog, managing VIP's, and +Read-Only scalability. Failover Manager has native capabilities to +integrate with Pgpool-II to redirect traffic to another primary +during Database failover overations. -![Deployment of EFM and Pgpool on separate virtual machines](images/edb_ha_architecture_separate_VM.png) +The following is a list of features that are supported in each of the +architectures: + +**Client Connect Failover** + +|Features | EFM with VIP | EFM with client connect failover | EFM with with PgBouncer | EFM with with Pgpool | +|---------------------------------------- | ----------------------- | ----------------------------------------- | ----------------------- | ---------------------| +|Connection pooling | | | Yes | Yes | +|Runs on cloud (no VIP) | | Yes | Yes | Yes | +|Halt traffic option | | | Yes | | +|Read-only scalability | | Yes (using multiple connection factories) | | Yes | +|Clustered proxy | | | | Yes | +|Proxy integration | | | ssh | PCP | +|Minimum servers required | 3 | 3 | 5 | 6 | +|Complexity | Low | Low | Medium | High | +|Network hops | 1 | 1 | | | +|Failover duration | Low | Medium | Low | Low | diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx deleted file mode 100644 index b56188f5a52..00000000000 --- a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/03_components_ha_pgpool.mdx +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: "Implementing High Availability with Pgpool" ---- - -Failover Manager monitors the health of Postgres nodes; in the event of a database failure, Failover Manager performs an automatic failover to a Standby node. Note that Pgpool does not monitor the health of backend nodes and will not perform failover to any Standby nodes. - -## Configuring Failover Manager - -Failover Manager provides functionality that will remove failed database nodes from Pgpool load balancing; it can also re-attach nodes to Pgpool when returned to the Failover Manager cluster. To configure EFM for high availability using Pgpool, you must set the following properties in the cluster properties file: - -`pgpool.enable` = `true/false` - -`pcp.user` = `User that would be invoking PCP commands` - -`pcp.host` = `Virtual IP that would be used by pgpool. Same as pgpool parameter 'delegate_IP’` - -`pcp.port` = `The port on which pgpool listens for pcp commands` - -`pcp.pass.file` = `Absolute path of PCPPASSFILE` - -`pgpool.bin` = `Absolute path of pgpool bin directory` - -## Configuring Pgpool - -The section lists the configuration of some important parameters in the `pgpool.conf` file to integrate the Pgpool-II with EFM. - -**Backend node setting** - -There are three PostgreSQL backend nodes, one Primary and two Standby nodes. Configure using `backend_*` configuration parameters in `pgpool.conf`, and use the equal backend weights for all nodes. This will make the read queries to be distributed equally among all nodes. - -```text -backend_hostname0 = ‘server1_IP' -backend_port0 = 5444 -backend_weight0 = 1 -backend_flag0 = 'ALLOW_TO_FAILOVER' - -backend_hostname1 = ‘server2_IP' -backend_port1 = 5444 -backend_weight1 = 1 -backend_flag1 = 'ALLOW_TO_FAILOVER' - -backend_hostname2 = ‘server3_IP' -backend_port2 = 5444 -backend_weight2 = 1 -backend_flag2 = 'ALLOW_TO_FAILOVER' -``` - -**Enable Load-balancing and streaming replication mode** - -Set the following configuration parameter in the `pgpool.conf` file to enable load balancing and streaming replication mode: - -- For Pgpool version 4.2: - - ```text - backend_clustering_mode = 'streaming_replication' - load_balance_mode = on - ``` - -- For Pgpool versions prior to 4.2: - - ```text - master_slave_mode = on - master_slave_sub_mode = 'stream' - load_balance_mode = on - ``` - -**Disable health-checking and failover** - -Health-checking and failover must be handled by EFM and hence, these must be disabled on Pgpool-II side. To disable the health-check and failover on pgpool-II side, assign the following values: - -```text -health_check_period = 0 -failover_on_backend_error = off -failover_if_affected_tuples_mismatch = off -failover_command = ‘’ -failback_command = ‘’ -``` - -Ensure the following while setting up the values in the `pgpool.conf` file: - -- Keep the value of wd_priority in pgpool.conf different on each node. The node with the highest value gets the highest priority. -- The properties backend_hostname0 , backend_hostname1, backend_hostname2 and so on are shared properties (in EFM terms) and should hold the same value for all the nodes in pgpool.conf file. -- Update the correct interface value in *if\_* \* and arping cmd props in the pgpool.conf file. -- Add the properties heartbeat_destination0, heartbeat_destination1, heartbeat_destination2 etc. as per the number of nodes in pgpool.conf file on every node. Here heartbeat_destination0 should be the ip/hostname of the local node. - -**Setting up PCP** - -Script uses the PCP interface, So we need to set up the PCP and .PCPPASS file to allow PCP connections without password prompt. - -setup PCP: - -setup PCPPASS: - -Note that the load-balancing is turned on to ensure read scalability by distributing read traffic across the standby nodes - -The health checking and error-triggered backend failover have been turned off, as Failover Manager will be responsible for performing health checks and triggering failover. It is not advisable for Pgpool to perform health checking in this case, so as not to create a conflict with Failover Manager, or prematurely perform failover. - -Finally, `search_primary_node_timeout` has been set to a low value to ensure prompt recovery of Pgpool services upon an Failover Manager-triggered failover. - -## Virtual IP Addresses - -Both Pgpool-II and Failover Manager provide functionality to employ a virtual IP for seamless failover. While both provide this capability, the pgpool-II leader is the process that receives the Application connections through the Virtual IP. As in this design, such Virtual IP management is performed by the Pgpool-II watchdog system. EFM VIP has no beneficial effect in this design and it must be disabled. - -Note that in a failure situation of the active instance of Pgpool (The Primary Pgpool Server in our sample architecture), the next available Standby Pgpool instance (according to watchdog priority) will be activated and takes charge as the leader Pgpool instance. - -## Configuring Pgpool-II Watchdog - -Watchdog provides the high availability of Pgpool-II nodes. This section lists the configuration required for watchdog on each Pgpool-II node. - - -### Configuring Pgpool-II Watchdog for Pgpool version 4.2 - -Unlike 4.1 or prior versions, you need to keep all the watchdog parameters identical on all the hosts while configuring Pgpool version 4.2. - -The following configuration parameters enable and configure the watchdog. The interval and retry values can be adjusted depending upon the requirements and testing results. - -```text -use_watchdog = on # enable watchdog -delegate_IP = ‘Virtual IP address’ -wd_lifecheck_method = 'heartbeat' -wd_interval = 10 # we can lower this value for quick detection -wd_life_point = 3 -# virtual IP control -ifconfig_path = '/sbin' # ifconfig command path -if_up_cmd = 'ifconfig eth0:0 inet $_IP_$ netmask 255.255.255.0' - # startup delegate IP command -if_down_cmd = 'ifconfig eth0:0 down' # shutdown delegate IP command -arping_path = '/usr/sbin' # arping command path -hostname0 = 'node-1' -pgpool_port0 = 9999 -wd_port0 = 9000 - -hostname1 = 'node-2' -pgpool_port1 = 9999 -wd_port1 = 9000 - -heartbeat_hostname0 = 'node-1' -heartbeat_port0 = 9694 -heartbeat_device0 = '' - -heartbeat_hostname1 = 'node-2' -heartbeat_port1 = 9694 -heartbeat_device1 = '' -``` - -Since some of the parameters would be same on all the nodes, you need to specify a mechanism to distinguish between local and remote watchdog nodes. For this purpose, you can create `pgpool_node_id` file in `pgpool.conf` directory with value 0,1, and 2 on node-1, node-2,node-3 respectively. The ownership and permissions for this file would be the same as `pgpool.conf` file. - -The following is an example of the file on the first node: - -```text -[root@ONE efm-4.2]}> cat /etc/sysconfig/edb/pgpool4.2/pgpool_node_id -0 -[root@ONE efm-4.2]}> -``` - -### Configuring Pgpool-II Watchdog for Pgpool versions prior to 4.2 - -**Common watchdog configurations on all Pgpool nodes** - -The following configuration parameters enable and configure the watchdog. The interval and retry values can be adjusted depending upon the requirements and testing results. - -```text -use_watchdog = on # enable watchdog -wd_port = 9000 # watchdog port, can be changed -delegate_IP = ‘Virtual IP address’ -wd_lifecheck_method = 'heartbeat' -wd_interval = 10 # we can lower this value for quick detection -wd_life_point = 3 -# virtual IP control -ifconfig_path = '/sbin' # ifconfig command path -if_up_cmd = 'ifconfig eth0:0 inet $_IP_$ netmask 255.255.255.0' - # startup delegate IP command -if_down_cmd = 'ifconfig eth0:0 down' # shutdown delegate IP command -arping_path = '/usr/sbin' # arping command path -``` - -!!! Note - Replace the value of eth0 with the network interface on your system. See [Chapter 5](05_appendix_b/#configuration-for-number-of-connections-and-pooling) for tuning the number of connections, and pooling configuration. - -**Watchdog configurations on server 2** - -```text -other_pgpool_hostname0 = 'server 3 IP/hostname' -other_pgpool_port0 = 9999 -other_wd_port0 = 9000 -other_pgpool_hostname1 = 'server 4 IP/hostname' -other_pgpool_port1 = 9999 -other_wd_port1 = 9000 -wd_priority = 1 -``` - -**Watchdog configurations on server 3** - -```text -other_pgpool_hostname0 = 'server 2 IP/hostname' -other_pgpool_port0 = 9999 -other_wd_port0 = 9000 -other_pgpool_hostname1 = 'server 4 IP/hostname' -other_pgpool_port1 = 9999 -other_wd_port1 = 9000 -wd_priority = 3 -``` - -**Watchdog configurations on server 4** - -```text -other_pgpool_hostname0 = 'server 2 IP/hostname' -other_pgpool_port0 = 9999 -other_wd_port0 = 9000 -other_pgpool_hostname1 = 'server 3 IP/hostname' -other_pgpool_port1 = 9999 -other_wd_port1 = 9000 -wd_priority = 5 # use high watchdog priority on server 4 -``` - -!!! Note - Replace the value of eth0 with the network interface on your system. See [Chapter 5](05_appendix_b/#configuration-for-number-of-connections-and-pooling) for tuning the number of connections, and pooling configuration. - diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/03_efm_vip.mdx b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/03_efm_vip.mdx new file mode 100644 index 00000000000..4d85353f815 --- /dev/null +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/03_efm_vip.mdx @@ -0,0 +1,41 @@ +--- +title: "Failover Manager with virtual IP" +--- + + +Failover Manager provides support for clusters that use a virtual IP (VIP). + +![Failover Manager with VIP](images/efm_with_vip.png) + +*Figure 1: Failover Manager's traffic routing using virtual IP* + +Using Failover Manager with VIP +------------------------------- + +### Installing + +Install and configure the Advanced Server database and Failover Manager +on three servers as following: + + + Systems |Components + ------------------------------------------|----------------------------------------------------------------------------- + PG Primary, PG Standby1, and PG Standby2 | Primary / standby nodes running Advanced Server 13 and Failover Manager 4.2 + + +### Specifying VIP + +In the cluster properties file, provide the hostname or IP address in +the `virtual.ip` property. Specify the corresponding prefix in the +`virtual.ip.prefix` property. Use the `virtual.ip.interface` property to +provide the network interface used by the VIP. By default, the +`virtual.ip` and `virtual.ip.prefix` values must be the same across all the +agents. + +The specified virtual IP address is assigned only to the primary node of +the cluster. If you specify `virtual.ip.single=true`, the same VIP address +will be used on the new primary in the event of a failover. Specify a +value of false to provide a unique IP address for each node of the +cluster. + +For information about using a virtual IP address, see [Using Failover Manager with Virtual IP Addresses](https://www.enterprisedb.com/docs/efm/latest/efm_user/04_configuring_efm/05_using_vip_addresses/#using_vip_addresses). diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/04_appendix_a.mdx b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/04_appendix_a.mdx deleted file mode 100644 index a220006c9cd..00000000000 --- a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/04_appendix_a.mdx +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: "EFM Pgpool Integration Using Azure Network Load Balancer" ---- - - - -This section describes a specific use case for EFM Pgpool integration, where the database, EFM, and Pgpool are installed on CentOS 8 Virtual Machines in Azure. For this specific use case, Azure Load Balancer (NLB) has been used to distribute the traffic amongst all the active Pgpool Instances instead of directing the traffic using Pgpool VIP. - -![Architecture diagram for EFM and Pgpool integration using Azure Load Balancer](images/EFM_PgPool_Azure.png) - -**Step 1 (Installation)**: - -Install and configure Advanced Server database, EFM, and Pgpool on Azure Virtual Machines as following: - -| **Systems** | **Components** | -| ----------- | ------------------------------------------------------------------------------ | -| Primary | Primary node running Advanced Server 13 and Failover Manager 4.2 | -| Standby 1 | Standby node running Advanced Server 13, Failover Manager 4.2, and Pgpool 4.2. | -| Standby 2 | Standby node running Advanced Server 13, Failover Manager 4.2, and Pgpool 4.2. | -| Witness | Witness node running Failover Manager 4.2 and Pgpool 4.2. | - -**Step 2 (Pgpool configuration)**: - -Configure Pgpool as per the steps given in chapter 3 (except for delegate_ip, which should be left empty in this architecture). - -**Step 3 (Azure Load Balancer configuration)**: - -You need to do the following configuration for using Azure NLB: - -**Networking**: You need to ensure the following settings for Network Load Balancer and for each of the virtual machines: Assign Public IP as well as private IP to the NLB, and only private IP to the virtual machines. The application server should connect to the NLB over public IP and NLB in turn should connect to the virtual machines over private IPs. - -In the current scenario, following are the IP addresses assigned to each component: - -- Public IP of NLB : 40.76.240.33 (pcp.host) -- Private IP of Primarydb : 172.16.1.3 (note that this is not part of the backend pool of the Load Balancer) -- Private IP of Standby 1 : 172.16.1.4 -- Private IP of Standby 2 : 172.16.1.5 -- Private IP of witness node: 172.16.1.6 - -Ensure that the ports required to run the database, EFM, and Pgpool are open for communication. Following is the list of default ports for each of these component (you can customize the ports for your environment): - -- Database: 5444 -- EFM: 7800 (bind.address) -- Pgpool: 9000, 9694, 9898, 9999 - -**Backend pool**: Create a Backend pool consisting of all the 3 virtual machines running Pgpool instances. Use the private IPs of the virtual machines to create the Backend pool. - -![Backend pool in Azure console](images/backend_pools.png) - -**Health Probe**: Add a health probe to check if the Pgpool instance is available on the virtual machines. The health probe periodically pings the virtual machines of the Backend pool on port 9999. If it does not receive any response from any of the virtual machines, it assumes that the Pgpool instance is not available and hence stops sending traffic towards that particular machine. - -![Health probes in Azure console](images/health_probes.png) - -**Load balancing rules**: Add two Load balancing rules - one each for port 9898 and port 9999. These rules should ensure that the network traffic coming towards that particular port gets distributed evenly among all the virtual machines present in the Backend pool. - -![Load balancing rules in Azure console](images/load_balancing_rules.png) - -1. Rule created for port 9999 (i.e. PCP port) - -![Load balancing rule for port 9999](images/rule_port_9898.png) - -1. Rule created for port 9999 (i.e. Pgpool port) - -![Load balancing rule for port 9999](images/rule_port_9999.png) - -After configuration of the above-mentioned setup, you can connect to Postgres on the IP address of the Network Load Balancer on port 9999. If a failure occurs on the Primary database server, EFM will promote a new Primary and then reconfigure Pgpool to redistribute traffic. If any one of the Pgpool processes is not available to accept traffic anymore, the Network Load Balancer will redistribute all the traffic to the remaining two Pgpool processes. Make sure that listen_backlog_multiplier is tuned to compensate for the higher number of connections in case of failover. diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/04_efm_client_connect_failover.mdx b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/04_efm_client_connect_failover.mdx new file mode 100644 index 00000000000..2ce56286efc --- /dev/null +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/04_efm_client_connect_failover.mdx @@ -0,0 +1,40 @@ +--- +title: "Failover Manager with client connect failover" +--- + +Most of the PostgreSQL connection libraries support +client connection failover. These libraries support connection strings +with more than one database server. On first connection attempt or when the connection is lost (which also occurs during a failover), the client driver will connect to the supplied hosts one by one, until it finds a read-write connection. Do note that the time for reconnecting to the new +master is highly dependent on the connection timeouts as configured in +the driver and tcp layer. + +![Failover Manager traffic routing diagram for client connect failover](images/efm_with_client_connection_failover.png) + +
Figure 2: Failover Manager's traffic routing using client connect failover
+ +Using Failover Manager with Client Connection Failover +------------------------------------------------------- + +### Installing + +Install and configure Advanced Server and Failover Manager on three servers as following: + + Systems | Components + -------------------------------------------|----------------------------------------------------------------------------- + PG Primary, PG Standby1, and PG Standby2 | Primary or standby nodes running Advanced Server 13 and Failover Manager 4.2 + + +Note that the virtual IP configuration in `efm.properties` (virtual.ip, +virtual.ip.interface, virtual.ip.prefix, and virtual.ip.single) are not +required to be configured. + +### Configuring Client Connection Failover + + + Driver | Client Connection Failover Support| Version Supported | Configuration +-----------------|-----------------------------------|-------------------------|--------------------------------- + JDBC | Yes | All supported versions | Supply multiple hosts in the connection string, and set the `targetServerType` attribute as primary. Example: `jdbc:postgresql://host1:5444,host2:5444/accounting?targetServerType=primary`. More information: https://jdbc.postgresql.org/documentation/head/connect.html#connection-failover + libpq | Yes | 10 and above | Supply multiple hosts in the connection string, and set the `target_session_attrs` attribute as read-write. Example: `postgresql://host1:5444,host2:5444/edb?target_session_attrs=read-write`. More information: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING + .NET | Yes | 6 and above | Supply multiple hosts in the connection string, and set the Target Session Attributes attribute as primary. Example: `Host=host1,host2;Username=test;Password=test;Target Session Attributes=primary`. More information: https://www.npgsql.org/doc/failover-and-load-balancing.html + OCL | Yes | 10 and above | OCL is based on `libpq`, hence check `libpq` for details. + ODBC | No | | diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/05_appendix_b.mdx b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/05_appendix_b.mdx deleted file mode 100644 index 785a9efa581..00000000000 --- a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/05_appendix_b.mdx +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "Configuration for Number of Connections and Pooling" ---- - -Pgpool has some configuration to tune the pooling and connection processing. Depending on this configuration, also the Postgres configuration for `max_connections` should be set to make sure all connections can be accepted as required. Furthermore, note that the Cloud Architecture works with active/active instances, which requires to spread `num_init_children` over all Pgpool instances (divide the normally used value by the number of active instances). The below text describes the effect of changing the configuration, and advises values for both the on-premise and the Cloud architecture. - -**max_pool**: Generally, it is advised to set `max_pool` to 1. Alternatively, for applications with a lot of reconnects, `max_pool` can be set to the number of distinct combinations of users, databases and connection options for the application connections. All but one connection in the pool would be stale connections, which consumes a connection slot from Postgres, without adding to performance. It is therefore advised not to configure `max_pool` beyond 4 to preserve a healthy ratio between active and stale connections. As an example, for an application which constantly reconnects and uses 2 distinct users both connecting to their own database, set it to 2. If both users would be able to connect to both databases set it to 4. Note that increasing `max_pool` requires to tune down `num_init_children` in Pgpool, or tune up `max_connections` in Postgres. - -**num_init_children**: It is advised to set `num_init_children` to the number of connections that could be running active in parallel, but the value should be divided by the number of active Pgpool-II instances (one with the on-premise architecture, and all instances for the cloud architecture). As an example: In an architecture with 3 Pgpool instances, to allow the application to have 100 active connections in parallel, set `num_init_children` to 100 for the on-premise architecture, and set `num_init_children` to 33 for the cloud architecture. Note that increasing `num_init_children` generally requires to tune up `max_connections` in Postgres. - -**listen_backlog_multiplier**: Can be set to multiply the number of open connections (as perceived by the application) with the number of active connections (`num_init_children`). As an example, when the application might open 500 connections of which 100 should be active in parallel, with the on-premise architecture, `num_init_children` should be set to 100, and `listen_backlog_multiplier` should be set to 4. This setup can process 100 connections active in parallel, and another 400 (`listen_backlog_multiplier*num_init_children`) connections will be queued before connections will be blocked. The application would perceive a total of 500 open connections, and Postgres would process the load of 100 connections maximum at all times. Note that increasing `listen_backlog_multiplier` only causes the application to perceive more connections, but will not increase the number of parallel active connections (which is determined by `num_init_children`). - -**max_connections**: It is advised to set `max_connections` in Postgres higher than `[number of active pgpool instances]*[max_pool]*[num_init_children] + [superuser_reserved_connections] (Postgres)`. As an example: in the on-premise setup with 3 instances active/passive, `max_pool` set to 2, `num_init_children` set to 100, and `superuser_reserved_connections (Postgres)` set to 5, Postgres `max_connections` should be set equal or higher then `[1*2*100+5]` which is 205 connections or higher. A similar setup in the cloud setup would run with 3 active instances, `max_pool` set to 2, `num_init_children` set to 33, and `superuser_reserved_connections (Postgres)` set to 5, in which case Postgres `max_connections` should be set equal or higher than `[3*2*33+5]` which is 203 or higher. Note that configuring below the advised setting can cause issues opening new connections, and in a combination with `max_pool` can cause unexpected behaviour (low or no active connections but still connection issues due to stale pooled connections using connection slots from Postgres. For more information on the relation between `num_init_children`, `max_pool` and `max_connections`, see this background information. diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/05_efm_pgbouncer.mdx b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/05_efm_pgbouncer.mdx new file mode 100644 index 00000000000..ee3ea31e712 --- /dev/null +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/05_efm_pgbouncer.mdx @@ -0,0 +1,309 @@ +--- +title: "Failover Manager with PgBouncer" +--- + +You can use Failover Manager and PgBouncer to provide high availability +in an on-premises setup as well as a cloud setup. PgBouncer is a popular +connection pooler, but it is not enough to achieve PostgreSQL High +Availability by itself as it doesn't have multi-host configuration, +failover, or detection. + + + + +Failover Manager with PgBouncer On-premises +-------------------------------------------- + +For an on-premises setup, use the connection libraries to provide high +availability by using a connection string with multiple hosts. + +![Failover Manager using pgBouncer on-premises architecture diagram](images/efm_with_pgbouncer_on_premises.png) + +
Figure 3: Failover Manager's traffic routing using PgBouncer on-premises
+ +Failover Manager with PgBouncer in Cloud +---------------------------------------- + +For a cloud setup, use a NLB (Network Load Balancer) to balance the traffic on both instances of PgBouncer. + +![Failover Manager with PgBouncer cloud architecture diagram](images/efm_with_pgbouncer_on_cloud.png) + +
Figure 4: Failover Manager's traffic routing using PgBouncer in cloud
+ +Note that for multiple reasons EDB does not support this architecture +with PgBouncer and Failover Manager/PostgreSQL running on the same +machines: + +- A restriction with Cloud Network Load Balancers [Azure](https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-troubleshoot-backend-traffic#cause-4-accessing-the-internal-load-balancer-frontend-from-the-participating-load-balancer-backend-pool-vm) + does not route traffic properly when source and destination reside + on the same machines. + +- In mixed architecture, traffic between PgBouncer and Postgres could + become unbalanced (sometimes local, sometimes networked). + +- PgBouncer and PostgreSQL compete for resources. + +- A master failure would impact both routing (PgBouncer) and Database + when these two components are combined on the same machines. + + +Using Failover Manager with PgBouncer +------------------------------------- + +### Installing + +Install and configure Advanced Server database, Failover Manager, and PgBouncer on AWS Virtual Machines as following: + + + Systems | Components + --------------------| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + PgDB srv 1, 2, 3 | Primary / standby node running Advanced Server 13 and Failover Manager 4.2 + PgBouncer srv 1, 2 | PgBouncer node running PgBouncer 1.15. These 2 nodes should be registered as targets in the Target Group. Note that there could be more, but 2 is the minimum, and is sufficient for most cases. + + +### Configuring Failover Manager + +Use the instructions provided in the *[EFM documentation](https://www.enterprisedb.com/docs/efm/latest/efm_user/)* +to configure Failover Manager. Perform the following steps in addition +to those instructions: + +1. Create an integration script that connects to every (remote) +PgBouncer host and runs the redirect script. The script should be +located at `/usr/edb/efm-4.2/bin/efm_pgbouncer_functions`, should be +executable by the `efm` user, and should have the following contents: + +``` text + + #!/bin/bash + set -e + IFS=', ' read -r -a PGB_HOSTS <<< "$3" + FAILED_PGB_HOST='' + for PGB_HOST in "${PGB_HOSTS[@]}"; do + echo "redirecting to '$2' on enterprisedb@${PGB_HOST}" + ssh "enterprisedb@${PGB_HOST}" /usr/edb/pgbouncer1.15/bin/redirect.sh "$2" || FAILED_PGB_HOST="$FAILED_PGB_HOST $PGB_HOST" < /dev/null + done + + # return exit code to inform EFM agent about failure. The agent would send a failure + # notification accordingly for manual intervention + if [ ! -z "$FAILED_PGB_HOST" ]; then + echo "Failed to redirect to '$2' on '$FAILED_PGB_HOST'" + exit 1 + fi + +``` + +2. Set script.load.balancer.attach to the custom script in the `efm` properties file: + +``` text + script.load.balancer.attach=/usr/edb/efm-4.2/bin/efm_pgbouncer_functions attach %h , +``` +Where `` is the hostname or IP address for PgBouncer server 1, and `` is the hostname or IP address for PgBouncer server 2. + +### Configuring PostgreSQL + +During normal operation, traffic is balanced across both PgBouncer instances, and both will open connections to PostgreSQL. Therefore make sure that in PostgreSQL the max_connections parameter is compensated to accept enough connections from both instances. + +### Configuring PgBouncer + +You can use the instructions provided in the [PgBouncer documentation](https://www.enterprisedb.com/docs/pgbouncer/latest/02_configuration_and_usage/) +to configure PgBouncer. Perform the following steps in addition to those instructions: + +1. Append the following line to the edb-pgbouncer-1.15.ini file: + +``` text + + %include /etc/edb/pgbouncer1.15/edb-pgbouncer-databases.ini +``` +2. In the edb-pgbouncer-1.15.ini file, set the value of `listen_addr` to *: +``` text + + listen_addr = * +``` +3. Leave the [databases] section empty in the `edb-pgbouncer-1.15.ini` file, and configure this section in a separate file + `/etc/edb/pgbouncer1.15/edb-pgbouncer-databases.ini`. Ensure that this extra config file is readable and writable by enterprisedb. + + The following is an example of the bash commands to create the file: + +``` text + + echo "[databases]" > /etc/edb/pgbouncer1.15/edb-pgbouncer-databases.ini + echo "edb= host=srv1" >> /etc/edb/pgbouncer1.15/edb-pgbouncer-databases.ini + chown enterprisedb: /etc/edb/pgbouncer1.15/edb-pgbouncer-databases.ini +``` + +4: Create a script `/usr/edb/pgbouncer1.15/bin/redirect.sh` that can be +used to reconfigure the databases chapter and reload pgbouncer. The +script should be owned by `root`, and be readable/executable by +user/group/other (0755). The script should have the following content: + + +``` text + #!/bin/bash + set -e + + #Some defaults + PGBOUNCER_DATABASE_INI=/etc/edb/pgbouncer1.15/edb-pgbouncer-databases.ini + + PGMSTR=${1:-localhost} + + # enterprisedb user does not have permissions to write in folder directly, so `sed -i` will not work + TMPFILE=$(mktemp) + sed "s/host=[A-Za-z0-9.]*/host=${PGMSTR}/" "${PGBOUNCER_DATABASE_INI}" > "${TMPFILE}" + if ! diff -q "${PGBOUNCER_DATABASE_INI}" "${TMPFILE}" >/dev/null; then + cat "${TMPFILE}" > "${PGBOUNCER_DATABASE_INI}" + pkill -SIGHUP pgbouncer + fi +``` + +### Configuring passwordless ssh + +For the PgBouncer integration, passwordless `ssh` access is required. There are multiple ways to configure `ssh`. We recommend that you follow your organization's recommended process to configure the passwordless `ssh`. For a quick start, you can also follow this example for configuring passwordless `ssh`. + + +#### Configuring on PgBouncer hosts + +1. On every PgBouncer host, temporarily set a password for the enterprisedb user. As root, run `passwd enterprisedb` and enter + the temporary password twice. + +2. Make sure that Passwordless `ssh` is enabled. You can check with the following command: + ``` text + grep ^PasswordAuthentication /etc/ssh/sshd_config + ``` + + Make sure it is set to `yes`. If needed, change and restart `ssh`. + +#### Configuring on Failover Manager/PostgreSQL hosts + +On every Failover Manager/postgres host, as the efm user: + +1. Run the following command: + ``` text + ssh-keygen -P "" -f ~/.ssh/id_rsa + ``` +2. For every PgBouncer host, copy the `ssh` key with the following command: + ``` text + ssh-copy-id enterprisedb@ + ``` + Note that the default home directory for the `enterprisedb` user is `/var/lib/edb`. If this directory is not already present, create the + directory manually. As a `sudo` user, run the following commands on each PgBouncer host: + ``` text + mkdir -p /var/lib/edb + chown -R enterprisedb:enterprisedb /var/lib/edb + ``` + +#### Resetting temporary passwords on PgBouncer hosts + +You can reset the temporary password for the enterprisedb user on every +PgBouncer host by running the following command as `root`: +``` text + passwd -d enterprisedb +``` +### Configuring the Network Load Balancer + +For the Failover Manager PgBouncer integration using Network Load Balancer in AWS or Azure, you need to perform additional steps. + +Add the following rules to the security groups to be used by the PgBouncer and database instances. + + + +- Rules for the security group to be used by the PgBouncer instances + (SG PgBouncer). + + + Type | Protocol | Port range | Source | Description + ------------|---------------|----------------|----------------|------------------- + Custom TCP | TCP | 6432 | Entire Subnet | PgBouncer + Custom TCP | TCP | 22 | Entire Subnet | ssh + + + In addition to these rules, add the rules for SSH and Ping as per your + requirement. + +- Rules for the security group used by the database instances (SG DB): + + + Type | Protocol | Port range | Source | Description + ------------|---------------|----------------|----------------|------------------- + Custom TCP | TCP | 7800 | Entire Subnet | Failover Manager + Custom TCP | TCP | 5444 | Entire Subnet |Postgres + Custom TCP | TCP | 22 | Entire Subnet | ssh + + + This ensures that the ports required to run the database, Failover Manager, and PgBouncer are open for communication between the nodes + and the Load Balancer for traffic routing and health monitoring. + + In addition to these rules, add the rules for SSH and Ping as per your requirement. + +If you are using Azure, proceed to the following section. If using AWS, see [Configuring NLB in AWS](#config_nlb_aws). + +#### Configuring NLB in Azure + +After configuring the rules described in [Creating rules for security groups](#sg_rules_pgbouncer), follow the Azure documentation to: + +- Create a Backend pool consisting of the two virtual machines running + the PgBouncer instances. Use the private IPs of the virtual machines + to create the Backend pool. + +- Add a health probe to check if the PgBouncer instance is available + on the virtual machines. Select `Protocol` as `TCP` and `Port` + as `6432`. + +- Add a Load balancing rule for port `6432`. This rule should ensure + that the network traffic coming towards that particular port gets + distributed evenly among all the virtual machines present in the + Backend pool. Select the `Type` as `Public` Load Balancer or + `Internal` Load Balancer. + +After completing these configurations, you can connect to the database +on the IP address of the Network Load Balancer using port 6432. If a +failure occurs on the Primary database server, Failover Manager will +promote a new Primary and then reconfigure PgBouncer to redistribute +traffic. If any one of the PgBouncer processes is not available to +accept traffic anymore, the Network Load Balancer will redistribute all +the traffic to the remaining PgBouncer processes. Make sure that +`max_client_conn` parameter is tuned to compensate for the higher number +of connections in case of failover. + + + + +#### Configuring NLB in AWS + +The following sample configuration assumes: + +- All the EC2 instances and the Loadbalancer are deployed in the same + Subnet. Note that if required, the database nodes could be added to + another Subnet, but that requires a more complex configuration and + might have a performance impact. + +- There is a security group for PgBouncer and a security group for the + database instances. + +After configuring the rules described in [Creating rules for security groups](#sg_rules_pgbouncer), follow the AWS documentation to + +- Create a target group with the following details: + + Name | Type | Protocol | Port | VPC + ---------------|----------------|----------------|----------------|----------------------------------------------------- + pgbouncer | Instances | TCP | 6432 | Select the VPC to which the instances are connected. + + Leave the rest of the settings (Health check TCP and Advanced health check settings) as default. + + Register the created Target Groups with the instances that are running PgBouncer. + +- Create a Load Balancer with the following details: + + Type | VPC | Listener | + -----------------------------------------------------------------------|-----------------------------------------------|---------------------------------------------------------------------------------------| + `Public` or `Internal`. EDB recommends using an Internal Load Balancer.| Choose a VPC and map it to the desired zones. | Create a listener with `TCP` as `6432`, and forward it to the target group pgbouncer. | + + +After completing the configurations, you can connect to the database on +the IP address of the Network Load Balancer on port 6432. If a failure +occurs on the Primary database server, Failover Manager promotes a new +Primary and then reconfigures PgBouncer to redistribute traffic. If any +one of the PgBouncer processes is not available to accept traffic +anymore, the Network Load Balancer will redistribute all the traffic to +the remaining PgBouncer processes. Make sure that `max_client_conn` +parameter is tuned to compensate for the higher number of connections in +case of failover. \ No newline at end of file diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/06_efm_pgpool.mdx b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/06_efm_pgpool.mdx new file mode 100644 index 00000000000..6df6cae089d --- /dev/null +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/06_efm_pgpool.mdx @@ -0,0 +1,314 @@ +--- +title: "Failover Manager with Pgpool" +--- + +Pgpool-II is a popular connection pooler which can provide many functionalities, like read-only traffic load balancing, connection pooling and running as a clustered proxy. With Failover Manager to manage availability and reconfiguring Pgpool-II to proxy to the correct primary the setup can deliver high availability in an on-premises setup as well as a cloud setup. + + +Failover Manager with Pgpool On-premises +----------------------------------------- + +For an on-premises setup, you can use a VIP to route the traffic to an available Pgpool-II instance. In this setup, the automatic failover of Pgpool-II is disabled, and Failover Manager is configured to manage Pgpool-II. + + +![Failover Manager with Pgpool on-premises](images/efm_with_pgpool_on_premises.png) + +
Figure 5: Failover Manager's traffic routing using Pgpool on-premises
+ +Failover Manager with Pgpool in Cloud +-------------------------------------- + +For environments with Network Load Balancers (e.a. cloud environments), you can use an NLB (Network Load Balancer) to balance the traffic over all available Pgpool-II instances without requiring a VIP. + +![Failover Manager with Pgpool in cloud](images/efm_with_pgpool_on_cloud.png) + +
Figure 6: Failover Manager's traffic routing using Pgpool in cloud
+ +## Using Failover Manager with Pgpool + + +### Installing + +Install and configure Advanced Server database, Failover Manager, and +Pgpool as following: + + + **Systems** | **Components** + --------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + PgDB server 1, server2, and server 3 | Primary / standby node running Advanced Server 13 and Failover Manager 4.2 + Pgpool |Pgpool node running Pgpool 4.2 in a watchdog configuration. These 3 nodes should be registered as targets in the Target Group. Note that there could be more, but 3 is the minimum, and is sufficient for most cases. + + + +Note that EDB does not support this architecture with Pgpool-II and +Failover Manager/PostgreSQL running on the same machines for the +following reasons: + +- A restriction with Cloud Network Load Balancers does not route + traffic properly when source and destination reside on the same + machines. + +- In mixed architecture, traffic between Pgpool and Postgres could + become unbalanced. + +- Pgpool and PostgreSQL could compete for resources. + +### Configuring Failover Manager + +Failover Manager provides functionality that will remove failed database +nodes from Pgpool load balancing; it can also re-attach nodes to Pgpool +when returned to the Failover Manager cluster. To configure Failover +Manager for high availability using Pgpool, you must set the following +properties in the cluster properties file: + +``` text + pgpool.enable = true + + pcp.user = User that would be invoking PCP commands + + pcp.host = Virtual IP of PgPool or IP of NLB + + pcp.port = 9898 + + pcp.pass.file = Absolute path of PCPPASSFILE + + pgpool.bin = Absolute path of pgpool bin directory +``` +### Configuring Pgpool + +The section lists the configuration of some important parameters in the +pgpool.conf file to integrate Pgpool-II with Failover Manager. + +#### Backend node setting + +There are three PostgreSQL backend nodes, one Primary and two Standby +nodes. Configure using backend_* configuration parameters in +pgpool.conf, and use the equal backend weights for all nodes. This will +make the read queries to be distributed equally among all nodes. +``` text + + backend_hostname0 = 'server1_IP' + + backend_port0 = 5444 + + backend_weight0 = 1 + + backend_flag0 = 'ALLOW_TO_FAILOVER' + + backend_hostname1 = 'server2_IP' + + backend_port1 = 5444 + + backend_weight1 = 1 + + backend_flag1 = 'ALLOW_TO_FAILOVER' + + backend_hostname2 = 'server3_IP' + + backend_port2 = 5444 + + backend_weight2 = 1 + + backend_flag2 = 'ALLOW_TO_FAILOVER' + +``` + +#### Enabling Load-balancing and streaming replication mode + +Set the following configuration parameter in the pgpool.conf file to enable load balancing and streaming replication mode: + +- For Pgpool version 4.2: + + ``` text + backend_clustering_mode = 'streaming_replication' + + load_balance_mode = on + ``` + +- For Pgpool versions prior to 4.2: + + ``` text + master_slave_mode = on + + master_slave_sub_mode = 'stream' + + load_balance_mode = on + ``` +#### Disabling health-checking and failover + +Health-checking and failover must be handled by Failover Manager and +hence, these must be disabled on Pgpool-II side. To disable the +health-check and failover on Pgpool-II side, assign the following +values: + +``` text + + health_check_period = 0 + + failover_on_backend_error = off + + failover_if_affected_tuples_mismatch = off + + failover_command = '' + + failback_command = '' + +``` + +Ensure the following while setting up the values in the pgpool.conf +file: + +- Keep the value of `wd_priority` in `pgpool.conf` different on each + node. The node with the highest value gets the highest priority. + +- The properties `backend_hostname0`, `backend_hostname1`, + `backend_hostname2`, and so on are shared properties (in EFM terms) + and should hold the same value in the pgpool.conf file on all the Pgpool-II nodes. + +- Update the correct interface value in `if_ *` and `arping` cmd props + in the `pgpool.conf` file. + +- Add the properties heartbeat_destination0, heartbeat_destination1, + heartbeat_destination2, and so on as per the number of nodes in + `pgpool.conf` file on every node. Here heartbeat_destination0 should + be the IP address or hostname of the local node. + +#### Setting up PCP + +The script uses the PCP interface, so you need to set up the PCP and +.PCPPASS file to allow PCP connections without a password prompt. + +To setup PCP, see: +[http://www.pgpool.net/docs/latest/en/html/configuring-pcp-conf.html](http://www.pgpool.net/docs/latest/en/html/configuring-pcp-conf.html) + +To setup PCPPASS, see: +[https://www.pgpool.net/docs/latest/en/html/pcp-commands.html](https://www.pgpool.net/docs/latest/en/html/pcp-commands.html) + +Note that the load-balancing is turned on to ensure read scalability by +distributing read traffic across the standby nodes. + +The health checking and error-triggered backend failover have been +turned off, as Failover Manager will be responsible for performing +health checks and triggering failover. It is not advisable for Pgpool to +perform health checking in this case, so as not to create a conflict +with Failover Manager, or prematurely perform failover. + +Finally, `search_primary_node_timeout` has been set to a low value to +ensure prompt recovery of Pgpool services upon a Failover +Manager-triggered failover. + +#### Using Virtual IP Addresses + +Both Pgpool-II and Failover Manager provide the functionality to employ +a virtual IP for seamless failover. While both provide this capability, +the Pgpool-II leader is the process that receives the Application +connections through the Virtual IP. As in this design, such Virtual IP +management is performed by the Pgpool-II watchdog system. Failover +Manager VIP has no beneficial effect in this design and it must be +disabled. + +Note that in a failure situation of the active instance of Pgpool (the +Primary Pgpool Server in our sample architecture), the next available +Standby Pgpool instance (according to watchdog priority) is activated +and takes charge as the leader Pgpool instance. + +### Configuring Network Load Balancer + +For Failover Manager Pgpool integration using Network Load Balancer in +AWS or Azure, you need to perform some additional steps. + + + +Add the following rules to the security groups to be used by the Pgpool instances: + +- Rules for the security group to be used by the Pgpool instances (SG + Pgpool). + + Type | Protocol | Port range | Source | Description + ------------|---------------|----------------|----------------|------------------- + Custom TCP | TCP | 9000 | Entire Subnet | Watchdog + Custom TCP | TCP | 9694 | Entire Subnet | Heartbeat + Custom TCP | TCP | 9898 | Entire Subnet | pcp + Custom TCP | TCP | 9999 | Entire Subnet | Pgpool + + In addition to the above rules, add the rules for SSH and Ping as per your requirement. + +- Rules for the security group to be used by the database instances + (SG DB): + + Type | Protocol | Port range | Source | Description + ------------|---------------|----------------|----------------|------------------- + Custom TCP | TCP | 7800 | Entire Subnet | Failover Manager + Custom TCP | TCP | 5444 | Entire Subnet |Postgres + + + This ensures that the ports required to run the database, Failover Manager, and Pgpool are open for communication between the nodes and + the Load Balancer for traffic routing and health monitoring. + + In addition to these rules, add the rules for SSH and Ping as per your requirement. + + If you are using Azure, proceed to the following section. If using AWS, see [Configuring NLB in AWS](#config_nlb_aws). + +#### Configuring NLB in Azure + +After configuring the rules described in [Creating rules for security groups](#sg_rules_pgpool), follow the Azure documentation to: + +- Create a Backend pool consisting of all the virtual machines running Pgpool instances. Use the private IPs of the virtual machines to create the Backend pool. + +- Add a health probe to check if the Pgpool instance is available on the virtual machines. Select Protocol as `TCP` and Port as `9999`. + +- Add two Load balancing rules - one each for port `9898` and port `9999`. These rules should ensure that the network traffic coming towards that particular port gets distributed evenly among all the virtual machines present in the Backend pool. Select the Type as `Public` Load Balancer or `Internal` Load Balancer. + +After completing these configurations, you can connect to the database +on the IP address of the Network Load Balancer on port 9999. If a +failure occurs on the Primary database server, Failover Manager will +promote a new Primary and then reconfigure Pgpool to redistribute +traffic. If any one of the Pgpool processes is not available to accept +traffic anymore, the Network Load Balancer will redistribute all the +traffic to the remaining two Pgpool processes. Make sure that +`listen_backlog_multiplier` parameter is tuned to compensate for the +higher number of connections in case of failover. + + + +#### Configuring NLB in AWS + +The following assumptions have been taken for the sample configuration: + +- All the EC2 instances and the Loadbalancer are deployed in the same + Subnet. Note that if required, the database nodes could be added to + another Subnet, but that requires a more complex configuration and + might have a performance impact. + +- There is a security group for Pgpool and a security group for the + database instances. + +After configuring the rules described in [Creating rules for security groups](#sg_rules_pgpool), follow the AWS documentation to: + +- Create two target groups with the following details: + + Name | Type | Protocol | Port | VPC + ---------------|----------------|----------------|----------------|----------------------------------------------------- + pcp | Instances | TCP | 9898 | Select the VPC to which the instances are connected. + pgpool | Instances | TCP | 9999 | Select the VPC to which the instances are connected. + + Leave the rest of the settings (Health check TCP and Advanced health check settings) as default. + + Register the created Target Groups with the instances that are running PgBouncer. + +- Create a Load Balancer with the following details: + + Type | VPC | Listener | + -----------------------------------------------------------------------|-----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + `Public` or `Internal`. EDB recommends using an Internal Load Balancer.| Choose a VPC and map it to the desired zones. | Create a listener with `TCP` as `9898`, and forward it to the target group pcp. Create another listener with `TCP` as `9999`, and forward it to the target group pgpool.| + + +After completing the configurations, you can connect to the database on +the IP address of the Network Load Balancer on port 9999. If a failure +occurs on the Primary database server, Failover Manager will promote a +new Primary and then reconfigure Pgpool to redistribute traffic. If any +one of the Pgpool processes is not available to accept traffic anymore, +the Network Load Balancer will redistribute all the traffic to the +remaining two Pgpool processes. Make sure that +`listen_backlog_multiplier` is tuned to compensate for the higher number +of connections in case of failover. diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_client_connection_failover.png b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_client_connection_failover.png new file mode 100644 index 00000000000..90ae48ad474 --- /dev/null +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_client_connection_failover.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:723a62c8c147861e27f4a27b3c982674b341e758a5dc46054103af50e78282d9 +size 44352 diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgbouncer_on_cloud.png b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgbouncer_on_cloud.png new file mode 100644 index 00000000000..aa7b134c6a6 --- /dev/null +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgbouncer_on_cloud.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb51e659fa9a46da4b8b78cbae86ef1403c8ffd6f8d9f8d3a3b0b1131d6289bd +size 54699 diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgbouncer_on_premises.png b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgbouncer_on_premises.png new file mode 100644 index 00000000000..819c43bd883 --- /dev/null +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgbouncer_on_premises.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c8b037a166d1e6291785ec0a217a75c2565a1bddf0a86d6a3b2c2b46da4788e +size 56200 diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgpool_on_cloud.png b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgpool_on_cloud.png new file mode 100644 index 00000000000..cc5e05749c4 --- /dev/null +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgpool_on_cloud.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c5dd7f17992881e6571c955ee5a78613a7895617e24bf6f1aa5aa787a97a369 +size 68758 diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgpool_on_premises.png b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgpool_on_premises.png new file mode 100644 index 00000000000..274f85a9dc2 --- /dev/null +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_pgpool_on_premises.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6159f84c6e97207321f72389704659c38aad3599b3515958a7e87e181a10767 +size 64886 diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_vip.png b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_vip.png new file mode 100644 index 00000000000..87b10ffe7c1 --- /dev/null +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/images/efm_with_vip.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbeb1bb4e99385c743b3eb3d6ab3f0c0addfc63f47ec5de39a5dbad56d2fdf21 +size 39090 diff --git a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/index.mdx b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/index.mdx index 66783b12538..787803a3b66 100644 --- a/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/index.mdx +++ b/product_docs/docs/efm/4.2/efm_pgpool_ha_guide/index.mdx @@ -1,11 +1,14 @@ --- -title: "High Availability & Scalability Guide" +title: "EFM High Availability & Scalability Guide" --- -Since high-availability and read scalability are not part of the core feature set of EDB Postgres Advanced Server, Advanced Server relies on external tools to provide this functionality. This document focuses on the functionality provided by EDB Failover Manager and Pgpool-II, and discusses the implications of a high-availability architecture formed around these tools. +EDB Failover Manager provides various high availability options for EDB +Postgres Advanced Server using the Postgres connection poolers and +connection libraries. This documentation discusses the implications of a +high-availability architecture for these options.
-introduction architecture components_ha_pgpool appendix_a appendix_b conclusion +introduction architecture efm_vip efm_client_connect_failover efm_pgbouncer efm_pgpool
diff --git a/product_docs/docs/epas/11/ecpgplus_guide/02_overview.mdx b/product_docs/docs/epas/11/ecpgplus_guide/02_overview.mdx new file mode 100644 index 00000000000..6caf38d2830 --- /dev/null +++ b/product_docs/docs/epas/11/ecpgplus_guide/02_overview.mdx @@ -0,0 +1,253 @@ +--- +title: "ECPGPlus - Overview" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/13/overview.html" +--- + + + +EnterpriseDB has enhanced ECPG (the PostgreSQL pre-compiler) to create ECPGPlus. ECPGPlus is a Pro\*C-compatible version of the PostgreSQL C pre-compiler. ECPGPlus translates a program that combines C code and embedded SQL statements into an equivalent C program. As it performs the translation, ECPGPlus verifies that the syntax of each SQL construct is correct. + +The following diagram charts the path of a program containing embedded SQL statements as it is compiled into an executable: + +![Compilation of a program containing embedded SQL statements](images/ecpg_path.png) + +
Fig. 1: Compilation of a program containing embedded SQL statements
+ + +To produce an executable from a C program that contains embedded SQL statements, pass the program (`my_program.pgc` in the diagram above) to the ECPGPlus pre-compiler. ECPGPlus translates each SQL statement in `my_program.pgc` into C code that calls the `ecpglib` API, and produces a C program (`my_program.c`). Then, pass the C program to a C compiler; the C compiler generates an object file (`my_program.o`). Finally, pass the object file (`my_program.o`), as well as the `ecpglib` library file, and any other required libraries to the linker, which in turn produces the executable (`my_program`). + +While the ECPGPlus preprocessor validates the *syntax* of each SQL statement, it cannot validate the *semantics*. For example, the preprocessor will confirm that an `INSERT` statement is syntactically correct, but it cannot confirm that the table mentioned in the `INSERT` statement actually exists. + +**Behind the Scenes** + +A client application contains a mix of C code and SQL code comprised of the following elements: + +- C preprocessor directives +- C declarations (variables, types, functions, ...) +- C definitions (variables, types, functions, ...) +- SQL preprocessor directives +- SQL statements + +For example: + +```text +1 #include +2 EXEC SQL INCLUDE sqlca; +3 +4 extern void printInt(char *label, int val); +5 extern void printStr(char *label, char *val); +6 extern void printFloat(char *label, float val); +7 +8 void displayCustomer(int custNumber) +9 { +10 EXEC SQL BEGIN DECLARE SECTION; +11 VARCHAR custName[50]; +12 float custBalance; +13 int custID = custNumber; +14 EXEC SQL END DECLARE SECTION; +15 +16 EXEC SQL SELECT name, balance +17 INTO :custName, :custBalance +18 FROM customer +19 WHERE id = :custID; +20 +21 printInt("ID", custID); +22 printStr("Name", custName); +23 printFloat("Balance", custBalance); +24 } +``` + +In the above code fragment: + +- Line 1 specifies a directive to the C preprocessor. + + C preprocessor directives may be interpreted or ignored; the option is controlled by a command line option (`-C PROC`) entered when you invoke ECPGPlus. In either case, ECPGPlus copies each C preprocessor directive to the output file (4) without change; any C preprocessor directive found in the source file will appear in the output file. + +- Line 2 specifies a directive to the SQL preprocessor. + + SQL preprocessor directives are interpreted by the ECPGPlus preprocessor, and are not copied to the output file. + +- Lines 4 through 6 contain C declarations. + + C declarations are copied to the output file without change, except that each `VARCHAR` declaration is translated into an equivalent `struct` declaration. + +- Lines 10 through 14 contain an embedded-SQL declaration section. + + C variables that you refer to within SQL code are known as `host variables`. If you invoke the ECPGPlus preprocessor in Pro\*C mode (`-C PROC`), you may refer to *any* C variable within a SQL statement; otherwise you must declare each host variable within a `BEGIN/END DECLARATION SECTION` pair. + +- Lines 16 through 19 contain a SQL statement. + + SQL statements are translated into calls to the ECPGPlus run-time library. + +- Lines 21 through 23 contain C code. + + C code is copied to the output file without change. + +Any SQL statement must be prefixed with `EXEC SQL` and extends to the next (unquoted) semicolon. For example: + +```text +printf(“Updating employee salaries\n”); + +EXEC SQL UPDATE emp SET sal = sal * 1.25; +EXEC SQL COMMIT; + +printf(“Employee salaries updated\n”); +``` + +When the preprocessor encounters the code fragment shown above, it passes the C code (the first line and the last line) to the output file without translation and converts each `EXEC SQL` statement into a call to an `ecpglib` function. The result would appear similar to the following: + +```text +printf("Updating employee salaries\n"); + +{ + ECPGdo( __LINE__, 0, 1, NULL, 0, ECPGst_normal, + "update emp set sal = sal * 1.25", + ECPGt_EOIT, ECPGt_EORT); +} + +{ + ECPGtrans(__LINE__, NULL, "commit"); +} + +printf(“Employee salaries updated\n”); +``` + +## Installation and Configuration + +On Windows, ECPGPlus is installed by the Advanced Server installation wizard as part of the `Database Server` component. On Linux, install with the `edb-asxx-server-devel` RPM package where `xx` is the Advanced Server version number. By default, the executable is located in: + +On Windows: + +```text +C:\Program Files\edb\as11\bin +``` + +On Linux: + +```text +/usr/edb/as11/bin +``` + +When invoking the ECPGPlus compiler, the executable must be in your search path (`%PATH%` on Windows, `$PATH` on Linux). For example, the following commands set the search path to include the directory that holds the ECPGPlus executable file `ecpg`. + +On Windows: + +```text +set EDB_PATH=C:\Program Files\edb\as11\bin +set PATH=%EDB_PATH%;%PATH% +``` + +On Linux: + +```text +export EDB_PATH==/usr/edb/as11/bin +export PATH=$EDB_PATH:$PATH +``` + +## Constructing a Makefile + +A `makefile` contains a set of instructions that tell the `make` utility how to transform a program written in C (that contains embedded SQL) into a C program. To try the examples in this guide, you will need: + +- a C compiler (and linker) +- the `make` utility +- ECPGPlus preprocessor and library +- a `makefile` that contains instructions for ECPGPlus + +The following code is an example of a `makefile` for the samples included in this guide. To use the sample code, save it in a file named `makefile` in the directory that contains the source code file. + +```text +INCLUDES = -I$(shell pg_config --includedir) +LIBPATH = -L $(shell pg_config --libdir) +CFLAGS += $(INCLUDES) -g +LDFLAGS += -g +LDLIBS += $(LIBPATH) -lecpg -lpq + +.SUFFIXES: .pgc,.pc + +.pgc.c: + ecpg -c $(INCLUDES) $? + +.pc.c: + ecpg -C PROC -c $(INCLUDES) $? +``` + +The first two lines use the `pg_config` program to locate the necessary header files and library directories: + +```text +INCLUDES = -I$(shell pg_config --includedir) +LIBPATH = -L $(shell pg_config --libdir) +``` + +The `pg_config` program is shipped with Advanced Server. + +`make` knows that it should use the `CFLAGS` variable when running the C compiler and `LDFLAGS` and `LDLIBS` when invoking the linker. ECPG programs must be linked against the ECPG run-time library (`-lecpg`) and the libpq library (`-lpq`) + +```text +CFLAGS += $(INCLUDES) -g +LDFLAGS += -g +LDLIBS += $(LIBPATH) -lecpg -lpq +``` + +The sample `makefile` instructs make how to translate a `.pgc` or a `.pc` file into a C program. Two lines in the `makefile` specify the mode in which the source file will be compiled. The first compile option is: + +```text +.pgc.c: + ecpg -c $(INCLUDES) $? +``` + +The first option tells `make` how to transform a file that ends in `.pgc` (presumably, an ECPG source file) into a file that ends in `.c` (a C program), using community ECPG (without the ECPGPlus enhancements). It invokes the ECPG pre-compiler with the `-c` flag (instructing the compiler to convert SQL code into C), using the value of the `INCLUDES` variable and the name of the `.pgc` file. + +```text +.pc.c: + ecpg -C PROC -c $(INCLUDES) $? +``` + +The second option tells `make` how to transform a file that ends in `.pg` (an ECPG source file) into a file that ends in `.c` (a C program), using the ECPGPlus extensions. It invokes the ECPG pre-compiler with the `-c` flag (instructing the compiler to convert SQL code into C), as well as the `-C PROC` flag (instructing the compiler to use ECPGPlus in Pro\*C-compatibility mode), using the value of the `INCLUDES` variable and the name of the `.pgc` file. + +When you run `make`, pass the name of the ECPG source code file you wish to compile. For example, to compile an ECPG source code file named `customer_list.pgc`, use the command: + +```text +make customer_list +``` + +The `make` utility consults the `makefile` (located in the current directory), discovers that the `makefile` contains a rule that will compile `customer_list.pgc` into a C program (`customer_list.c`), and then uses the rules built into `make` to compile `customer_list.c` into an executable program. + +## ECPGPlus Command Line Options + +In the sample `makefile` shown above, `make` includes the `-C` option when invoking ECPGPlus to specify that ECPGPlus should be invoked in Pro\*C compatible mode. + +If you include the `-C` `PROC` keywords on the command line, in addition to the ECPG syntax, you may use Pro\*C command line syntax; for example: + +```text +$ ecpg -C PROC INCLUDE=/usr/edb/as11/include acct_update.c +``` + +To display a complete list of the other ECPGPlus options available, navigate to the ECPGPlus installation directory, and enter: + +```text +./ecpg --help +``` + +The command line options are: + +| **Option** | **Description** | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| -c | Automatically generate C code from embedded SQL code. | +| -C *mode* | Use the `-C` option to specify a compatibility mode:

`INFORMIX`

`INFORMIX_SE`

`PROC` | +| -D *symbol* | Define a preprocessor *symbol*.

The *-D* keyword is not supported when compiling in *PROC mode.* Instead, use the Oracle-style *‘DEFINE=’* clause. | +| -h | Parse a header file, this option includes option `'-c'`. | +| -i | Parse system, include files as well. | +| -I directory | Search *directory* for `include` files. | +| -o *outfile* | Write the result to *outfile*. | +| -r *option* | Specify run-time behavior; *option* can be:

`no_indicator` - Do not use indicators, but instead use special values to represent NULL values.

`prepare` - Prepare all statements before using them.

`questionmarks` - Allow use of a question mark as a placeholder.

`usebulk` - Enable bulk processing for `INSERT`, `UPDATE`, and `DELETE` statements that operate on host variable arrays. | +| --regression | Run in regression testing mode. | +| -t | Turn on `autocommit` of transactions. | +| -l | Disable `#line` directives. | +| --help | Display the help options. | +| --version | Output version information. | + +!!! Note + If you do not specify an output file name when invoking ECPGPlus, the output file name is created by stripping off the `.pgc` filename extension, and appending `.c` to the file name. diff --git a/product_docs/docs/epas/11/ecpgplus_guide/03_using_embedded_sql.mdx b/product_docs/docs/epas/11/ecpgplus_guide/03_using_embedded_sql.mdx new file mode 100644 index 00000000000..e86dd36ffcf --- /dev/null +++ b/product_docs/docs/epas/11/ecpgplus_guide/03_using_embedded_sql.mdx @@ -0,0 +1,362 @@ +--- +title: "Using Embedded SQL" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/13/using_embedded_sql.html" +--- + + + +Each of the following sections leads with a code sample, followed by an explanation of each section within the code sample. + +## Example - A Simple Query + +The first code sample demonstrates how to execute a `SELECT` statement (which returns a single row), storing the results in a group of host variables. After declaring host variables, it connects to the `edb` sample database using a hard-coded role name and the associated password, and queries the `emp` table. The query returns the values into the declared host variables; after checking the value of the `NULL` indicator variable, it prints a simple result set onscreen and closes the connection. + +```text +/************************************************************ + * print_emp.pgc + * + */ +#include + +int main(void) +{ + EXEC SQL BEGIN DECLARE SECTION; + int v_empno; + char v_ename[40]; + double v_sal; + double v_comm; + short v_comm_ind; + EXEC SQL END DECLARE SECTION; + + EXEC SQL WHENEVER SQLERROR sqlprint; + + EXEC SQL CONNECT TO edb + USER 'alice' IDENTIFIED BY '1safepwd'; + + EXEC SQL + SELECT + empno, ename, sal, comm + INTO + :v_empno, :v_ename, :v_sal, :v_comm INDICATOR:v_comm_ind + FROM + emp + WHERE + empno = 7369; + + if (v_comm_ind) + printf("empno(%d), ename(%s), sal(%.2f) comm(NULL)\n", + v_empno, v_ename, v_sal); + else + printf("empno(%d), ename(%s), sal(%.2f) comm(%.2f)\n", + v_empno, v_ename, v_sal, v_comm); + EXEC SQL DISCONNECT; +} +/***********************************************************\* +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio` library, and then declares the `main` function: + +```text +#include + +int main(void) +{ +``` + +Next, the application declares a set of host variables used to interact with the database server: + +```text +EXEC SQL BEGIN DECLARE SECTION; + int v_empno; + char v_ename[40]; + double v_sal; + double v_comm; + short v_comm_ind; +EXEC SQL END DECLARE SECTION; +``` + +Please note that if you plan to pre-compile the code in `PROC` mode, you may omit the `BEGIN DECLARE…END DECLARE` section. For more information about declaring host variables, refer to the [Declaring Host Variables](#declaring-host-variables). + +The data type associated with each variable within the declaration section is a C data type. Data passed between the server and the client application must share a compatible data type; for more information about data types, see the [Supported C Data Types](07_reference/#supported-c-data-types). + +The next statement instructs the server how to handle an error: + +```text +EXEC SQL WHENEVER SQLERROR sqlprint; +``` + +If the client application encounters an error in the SQL code, the server will print an error message to `stderr` (standard error), using the `sqlprint()` function supplied with `ecpglib`. The next `EXEC SQL` statement establishes a connection with Advanced Server: + +```text +EXEC SQL CONNECT TO edb + USER 'alice' IDENTIFIED BY '1safepwd'; +``` + +In our example, the client application connects to the `edb` database, using a role named `alice` with a password of `1safepwd`. + +The code then performs a query against the `emp` table: + +```text +EXEC SQL + SELECT + empno, ename, sal, comm + INTO + :v_empno, :v_ename, :v_sal, :v_comm INDICATOR :v_comm_ind + FROM + emp + WHERE + empno = 7369; +``` + +The query returns information about employee number `7369`. + +The `SELECT` statement uses an `INTO` clause to assign the retrieved values (from the `empno`, `ename`, `sal` and `comm` columns) into the `:v_empno`, `:v_ename`, `:v_sal` and `:v_comm` host variables (and the `:v_comm_ind` null indicator). The first value retrieved is assigned to the first variable listed in the `INTO` clause, the second value is assigned to the second variable, and so on. + +The `comm` column contains the commission values earned by an employee, and could potentially contain a `NULL` value. The statement includes the `INDICATOR` keyword, and a host variable to hold a null indicator. + +The code checks the null indicator, and displays the appropriate on-screen results: + +```text +if (v_comm_ind) + printf("empno(%d), ename(%s), sal(%.2f) comm(NULL)\n", + v_empno, v_ename, v_sal); +else + printf("empno(%d), ename(%s), sal(%.2f) comm(%.2f)\n", + v_empno, v_ename, v_sal, v_comm); +``` + +If the null indicator is `0` (that is, `false`), the `comm` column contains a meaningful value, and the `printf` function displays the commission. If the null indicator contains a non-zero value, `comm` is `NULL`, and `printf` displays a value of `NULL`. Please note that a host variable (other than a null indicator) contains no meaningful value if you fetch a `NULL` into that host variable; you must use null indicators to identify any value which may be `NULL`. + +The final statement in the code sample closes the connection to the server: + +```text +EXEC SQL DISCONNECT; +} +``` + +### Using Indicator Variables + +The previous example included an *indicator* *variable* that identifies any row in which the value of the `comm` column (when returned by the server) was `NULL`. An indicator variable is an extra host variable that denotes if the content of the preceding variable is `NULL` or truncated. The indicator variable is populated when the contents of a row are stored. An indicator variable may contain the following values: + +| Indicator Value | Denotes | +| --------------------------------------------- | -------------------------------------------------------------------------------- | +| If an indicator variable is less than `0`. | The value returned by the server was `NULL`. | +| If an indicator variable is equal to `0`. | The value returned by the server was not `NULL`, and was not truncated. | +| If an indicator variable is greater than `0`. | The value returned by the server was truncated when stored in the host variable. | + +When including an indicator variable in an `INTO` clause, you are not required to include the optional `INDICATOR` keyword. + +You may omit an indicator variable if you are certain that a query will never return a `NULL` value into the corresponding host variable. If you omit an indicator variable and a query returns a `NULL` value, `ecpglib` will raise a run-time error. + + + +### Declaring Host Variables + +You can use a *host variable* in a SQL statement at any point that a value may appear within that statement. A host variable is a C variable that you can use to pass data values from the client application to the server, and return data from the server to the client application. A host variable can be: + +- an array +- a `typedef` +- a pointer +- a `struct` +- any scalar C data type + +The code fragments that follow demonstrate using host variables in code compiled in `PROC` mode, and in non-`PROC` mode. The SQL statement adds a row to the `dept` table, inserting the values returned by the variables `v_deptno`, `v_dname` and `v_loc` into the `deptno` column, the `dname` column and the `loc` column, respectively. + +If you are compiling in `PROC` mode, you may omit the `EXEC SQL BEGIN DECLARE SECTION` and `EXEC SQL END DECLARE SECTION` directives. `PROC` mode permits you to use C function parameters as host variables: + +```text +void addDept(int v_deptno, char v_dname, char v_loc) +{ + EXEC SQL INSERT INTO dept VALUES( :v_deptno, :v_dname, :v_loc); +} +``` + +If you are not compiling in `PROC` mode, you must wrap embedded variable declarations with the `EXEC SQL BEGIN DECLARE SECTION` and the `EXEC SQL END DECLARE SECTION` directives, as shown below: + +```text +void addDept(int v_deptno, char v_dname, char v_loc) +{ + EXEC SQL BEGIN DECLARE SECTION; + int v_deptno_copy = v_deptno; + char v_dname_copy[14+1] = v_dname; + char v_loc_copy[13+1] = v_loc; + EXEC SQL END DECLARE SECTION; + + EXEC SQL INSERT INTO dept VALUES( :v_deptno, :v_dname, :v_loc); +} +``` + +You can also include the `INTO` clause in a `SELECT` statement to use the host variables to retrieve information: + +```text +EXEC SQL SELECT deptno, dname, loc + INTO :v_deptno, :v_dname, v_loc FROM dept; +``` + +Each column returned by the `SELECT` statement must have a type-compatible target variable in the `INTO` clause. This is a simple example that retrieves a single row; to retrieve more than one row, you must define a cursor, as demonstrated in the next example. + +## Example - Using a Cursor to Process a Result Set + +The code sample that follows demonstrates using a cursor to process a result set. There are four basic steps involved in creating and using a cursor: + +1. Use the `DECLARE CURSOR` statement to define a cursor. +2. Use the `OPEN CURSOR` statement to open the cursor. +3. Use the `FETCH` statement to retrieve data from a cursor. +4. Use the `CLOSE CURSOR` statement to close the cursor. + +After declaring host variables, our example connects to the `edb` database using a user-supplied role name and password, and queries the `emp` table. The query returns the values into a cursor named `employees`. The code sample then opens the cursor, and loops through the result set a row at a time, printing the result set. When the sample detects the end of the result set, it closes the connection. + +```text +/************************************************************ + * print_emps.pgc + * + */ +#include + +int main(int argc, char *argv[]) +{ + EXEC SQL BEGIN DECLARE SECTION; + char *username = argv[1]; + char *password = argv[2]; + int v_empno; + char v_ename[40]; + double v_sal; + double v_comm; + short v_comm_ind; + EXEC SQL END DECLARE SECTION; + + EXEC SQL WHENEVER SQLERROR sqlprint; + + EXEC SQL CONNECT TO edb USER :username IDENTIFIED BY :password; + + EXEC SQL DECLARE employees CURSOR FOR + SELECT + empno, ename, sal, comm  + FROM  + emp; + + EXEC SQL OPEN employees; + + EXEC SQL WHENEVER NOT FOUND DO break; + + for (;;) + { + EXEC SQL FETCH NEXT FROM employees  + INTO + :v_empno, :v_ename, :v_sal, :v_comm INDICATOR :v_comm_ind; + + if (v_comm_ind) + printf("empno(%d), ename(%s), sal(%.2f) comm(NULL)\n", + v_empno, v_ename, v_sal); + else + printf("empno(%d), ename(%s), sal(%.2f) comm(%.2f)\n", + v_empno, v_ename, v_sal, v_comm); + } + EXEC SQL CLOSE employees; + EXEC SQL DISCONNECT; +} +/************************************************************ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio` library, and then declares the `main` function: + +```text +#include + +int main(int argc, char *argv[]) +{ +``` + +Next, the application declares a set of host variables used to interact with the database server: + +```text +EXEC SQL BEGIN DECLARE SECTION; + char *username = argv[1]; + char *password = argv[2]; + int v_empno; + char v_ename[40]; + double v_sal; + double v_comm; + short v_comm_ind; +EXEC SQL END DECLARE SECTION; +``` + +`argv[]` is an array that contains the command line arguments entered when the user runs the client application. `argv[1]` contains the first command line argument (in this case, a `username`), and `argv[2]` contains the second command line argument (a `password`); please note that we have omitted the error-checking code you would normally include a real-world application. The declaration initializes the values of `username` and `password`, setting them to the values entered when the user invoked the client application. + +You may be thinking that you could refer to `argv[1]` and `argv[2]` in a SQL statement (instead of creating a separate copy of each variable); that will not work. All host variables must be declared within a `BEGIN/END DECLARE SECTION` (unless you are compiling in `PROC` mode). Since `argv` is a function *parameter* (not an automatic variable), it cannot be declared within a `BEGIN/END DECLARE SECTION`. If you are compiling in `PROC` mode, you can refer to *any* C variable within a SQL statement. + +The next statement instructs the server to respond to an SQL error by printing the text of the error message returned by ECPGPlus or the database server: + +```text +EXEC SQL WHENEVER SQLERROR sqlprint; +``` + +Then, the client application establishes a connection with Advanced Server: + +```text +EXEC SQL CONNECT TO edb USER :username IDENTIFIED BY :password; +``` + +The `CONNECT` statement creates a connection to the `edb` database, using the values found in the `:username` and `:password` host variables to authenticate the application to the server when connecting. + +The next statement declares a cursor named `employees`: + +```text +EXEC SQL DECLARE employees CURSOR FOR + SELECT + empno, ename, sal, comm  + FROM  + emp; +``` + +`employees` will contain the result set of a `SELECT` statement on the `emp` table. The query returns employee information from the following columns: `empno`, `ename`, `sal` and `comm`. Notice that when you declare a cursor, you do not include an `INTO` clause - instead, you specify the target variables (or descriptors) when you `FETCH` from the cursor. + +Before fetching rows from the cursor, the client application must `OPEN` the cursor: + +```text +EXEC SQL OPEN employees; +``` + +In the subsequent `FETCH` section, the client application will loop through the contents of the cursor; the client application includes a `WHENEVER` statement that instructs the server to `break` (that is, terminate the loop) when it reaches the end of the cursor: + +```text +EXEC SQL WHENEVER NOT FOUND DO break; +``` + +The client application then uses a `FETCH` statement to retrieve each row from the cursor `INTO` the previously declared host variables: + +```text +for (;;) +{ + EXEC SQL FETCH NEXT FROM employees + INTO + :v_empno, :v_ename, :v_sal, :v_comm INDICATOR :v_comm_ind; +``` + +The `FETCH` statement uses an `INTO` clause to assign the retrieved values into the `:v_empno`, `:v_ename`, `:v_sal` and `:v_comm` host variables (and the `:v_comm_ind` null indicator). The first value in the cursor is assigned to the first variable listed in the `INTO` clause, the second value is assigned to the second variable, and so on. + +The `FETCH` statement also includes the `INDICATOR` keyword and a host variable to hold a null indicator. If the `comm` column for the retrieved record contains a `NULL` value, `v_comm_ind` is set to a non-zero value, indicating that the column is `NULL`. + +The code then checks the null indicator, and displays the appropriate on-screen results: + +```text +if (v_comm_ind) + printf("empno(%d), ename(%s), sal(%.2f) comm(NULL)\n", + v_empno, v_ename, v_sal); +else + printf("empno(%d), ename(%s), sal(%.2f) comm(%.2f)\n", + v_empno, v_ename, v_sal, v_comm); +} +``` + +If the null indicator is `0` (that is, `false`), `v_comm` contains a meaningful value, and the `printf` function displays the commission. If the null indicator contains a non-zero value, `comm` is `NULL`, and `printf` displays the string `'NULL'`. Please note that a host variable (other than a null indicator) contains no meaningful value if you fetch a `NULL` into that host variable; you must use null indicators for any value which may be `NULL`. + +The final statements in the code sample close the cursor `(employees)`, and the connection to the server: + +```text +EXEC SQL CLOSE employees; +EXEC SQL DISCONNECT; +``` diff --git a/product_docs/docs/epas/11/ecpgplus_guide/04_using_descriptors.mdx b/product_docs/docs/epas/11/ecpgplus_guide/04_using_descriptors.mdx new file mode 100644 index 00000000000..cff005e0f16 --- /dev/null +++ b/product_docs/docs/epas/11/ecpgplus_guide/04_using_descriptors.mdx @@ -0,0 +1,556 @@ +--- +title: "Using Descriptors" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/13/using_descriptors.html" +--- + + + +Dynamic SQL allows a client application to execute SQL statements that are composed at runtime. This is useful when you don't know the content or form a statement will take when you are writing a client application. ECPGPlus does *not* allow you to use a host variable in place of an identifier (such as a table name, column name or index name); instead, you should use dynamic SQL statements to build a string that includes the information, and then execute that string. The string is passed between the client and the server in the form of a *descriptor*. A descriptor is a data structure that contains both the data and the information about the shape of the data. + +A client application must use a `GET DESCRIPTOR` statement to retrieve information from a descriptor. The following steps describe the basic flow of a client application using dynamic SQL: + +1. Use an `ALLOCATE DESCRIPTOR` statement to allocate a descriptor for the result set (select list). +2. Use an `ALLOCATE DESCRIPTOR` statement to allocate a descriptor for the input parameters (bind variables). +3. Obtain, assemble or compute the text of an SQL statement. +4. Use a `PREPARE` statement to parse and syntax-check the SQL statement. +5. Use a `DESCRIBE` statement to describe the select list into the select-list descriptor. +6. Use a `DESCRIBE` statement to describe the input parameters into the bind-variables descriptor. +7. Prompt the user (if required) for a value for each input parameter. Use a `SET DESCRIPTOR` statement to assign the values into a descriptor. +8. Use a `DECLARE CURSOR` statement to define a cursor for the statement. +9. Use an `OPEN CURSOR` statement to open a cursor for the statement. +10. Use a `FETCH` statement to fetch each row from the cursor, storing each row in select-list descriptor. +11. Use a `GET DESCRIPTOR` command to interrogate the select-list descriptor to find the value of each column in the current row. +12. Use a `CLOSE CURSOR` statement to close the cursor and free any cursor resources. + +A descriptor may contain the attributes listed in the table below: + +| **Field** | **Type** | **Attribute Description** | +| ----------------------------- | --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `CARDINALITY` | `integer` | The number of rows in the result set. | +| `DATA` | N/A | The data value. | +| `DATETIME_INTERVAL_CODE` | `integer` | If `TYPE` is `9`:

`1 - DATE`

`2 - TIME`

`3 - TIMESTAMP`

`4 - TIME WITH TIMEZONE`

`5 - TIMESTAMP WITH TIMEZONE` | +| `DATETIME_INTERVAL_PRECISION` | `integer` | Unused. | +| `INDICATOR` | `integer` | Indicates a `NULL` or truncated value. | +| `KEY_MEMBER` | `integer` | Unused (returns `FALSE`). | +| `LENGTH` | `integer` | The data length (as stored on server). | +| `NAME` | `string` | The name of the column in which the data resides. | +| `NULLABLE` | `integer` | Unused (returns `TRUE`). | +| `OCTET_LENGTH` | `integer` | The data length (in bytes) as stored on server. | +| `PRECISION` | `integer` | The data precision (if the data is of `numeric` type). | +| `RETURNED_LENGTH` | `integer` | Actual length of data item. | +| `RETURNED_OCTET_LENGTH` | `integer` | Actual length of data item. | +| `SCALE` | `integer` | The data scale (if the data is of `numeric` type). | +| `TYPE` | `integer` | A numeric code that represents the data type of the column:

`1 - SQL3_CHARACTER`

`2 - SQL3_NUMERIC`

`3 - SQL3_DECIMAL`

`4 - SQL3_INTEGER`

`5 - SQL3_SMALLINT`

`6 - SQL3_FLOAT`

`7 - SQL3_REAL`

`8 - SQL3_DOUBLE_PRECISION`

`9 - SQL3_DATE_TIME_TIMESTAMP`

`10 - SQL3_INTERVAL`

`12 - SQL3_CHARACTER_VARYING`

`13 - SQL3_ENUMERATED`

`14 - SQL3_BIT`

`15 - SQL3_BIT_VARYING`

`16 - SQL3_BOOLEAN` | + +## Example - Using a Descriptor to Return Data + +The following simple application executes an SQL statement entered by an end user. The code sample demonstrates: + +- how to use a SQL descriptor to execute a `SELECT` statement. +- how to find the data and metadata returned by the statement. + +The application accepts an SQL statement from an end user, tests the statement to see if it includes the `SELECT` keyword, and executes the statement. + +When invoking the application, an end user must provide the name of the database on which the SQL statement will be performed, and a string that contains the text of the query. + +For example, a user might invoke the sample with the following command: + +```text +./exec_stmt edb "SELECT * FROM emp" + + +/************************************************************ +/* exec_stmt.pgc +* +*/ + +#include +#include +#include +#include + +EXEC SQL WHENEVER SQLERROR SQLPRINT; +static void print_meta_data( char * desc_name ); + +char *md1 = "col field data ret"; +char *md2 = "num name type len"; +char *md3 = "--- -------------------- ----------------- ---"; + +int main( int argc, char *argv[] ) +{ + + EXEC SQL BEGIN DECLARE SECTION; + char *db = argv[1]; + char *stmt = argv[2]; + int col_count; + EXEC SQL END DECLARE SECTION; + + EXEC SQL CONNECT TO :db; + + EXEC SQL ALLOCATE DESCRIPTOR parse_desc; + EXEC SQL PREPARE query FROM :stmt; + EXEC SQL DESCRIBE query INTO SQL DESCRIPTOR parse_desc; + EXEC SQL GET DESCRIPTOR 'parse_desc' :col_count = COUNT; + +if( col_count == 0 ) +{ + EXEC SQL EXECUTE IMMEDIATE :stmt; + + if( sqlca.sqlcode >= 0 ) + EXEC SQL COMMIT; +} +else +{ + int row; + + EXEC SQL ALLOCATE DESCRIPTOR row_desc; + EXEC SQL DECLARE my_cursor CURSOR FOR query; + EXEC SQL OPEN my_cursor; + + for( row = 0; ; row++ ) + { + EXEC SQL BEGIN DECLARE SECTION; + int col; + EXEC SQL END DECLARE SECTION; + EXEC SQL FETCH IN my_cursor + INTO SQL DESCRIPTOR row_desc; + + if( sqlca.sqlcode != 0 ) + break; + + if( row == 0 ) + print_meta_data( "row_desc" ); + + printf("[RECORD %d]\n", row+1); + + for( col = 1; col <= col_count; col++ ) + { + EXEC SQL BEGIN DECLARE SECTION; + short ind; + varchar val[40+1]; + varchar name[20+1]; + EXEC SQL END DECLARE SECTION; + + EXEC SQL GET DESCRIPTOR 'row_desc' + VALUE :col + :val = DATA, :ind = INDICATOR, :name = NAME; + + if( ind == -1 ) + printf( " %-20s : \n", name.arr ); + else if( ind > 0 ) + printf( " %-20s : \n", name.arr ); + else + printf( " %-20s : %s\n", name.arr, val.arr ); + } + + printf( "\n" ); + + } + printf( "%d rows\n", row ); +} + +exit( 0 ); +} + +static void print_meta_data( char *desc_name ) +{ + EXEC SQL BEGIN DECLARE SECTION; + char *desc = desc_name; + int col_count; + int col; + EXEC SQL END DECLARE SECTION; + +static char *types[] = +{ + "unused ", + "CHARACTER ", + "NUMERIC ", + "DECIMAL ", + "INTEGER ", + "SMALLINT ", + "FLOAT ", + "REAL ", + "DOUBLE ", + "DATE_TIME ", + "INTERVAL ", + "unused ", + "CHARACTER_VARYING", + "ENUMERATED ", + "BIT ", + "BIT_VARYING ", + "BOOLEAN ", + "abstract " +}; + +EXEC SQL GET DESCRIPTOR :desc :col_count = count; + + +printf( "%s\n", md1 ); +printf( "%s\n", md2 ); +printf( "%s\n", md3 ); + +for( col = 1; col <= col_count; col++ ) +{ + + EXEC SQL BEGIN DECLARE SECTION; + int type; + int ret_len; + varchar name[21]; + EXEC SQL END DECLARE SECTION; + char *type_name; + + EXEC SQL GET DESCRIPTOR :desc + VALUE :col + :name = NAME, + :type = TYPE, + :ret_len = RETURNED_OCTET_LENGTH; + + if( type > 0 && type < SQL3_abstract ) + type_name = types[type]; + else + type_name = "unknown"; + + printf( "%02d: %-20s %-17s %04d\n", + col, name.arr, type_name, ret_len ); +} +printf( "\n" ); +} + +/************************************************************ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio` and `stdlib` libraries, SQL data type symbols, and the `SQLCA` (SQL communications area) structure: + +```text +#include +#include +#include +#include +``` + +The sample provides minimal error handling; when the application encounters an SQL error, it prints the error message to screen: + +```text +EXEC SQL WHENEVER SQLERROR SQLPRINT; +``` + +The application includes a forward-declaration for a function named `print_meta_data()` that will print the metadata found in a descriptor: + +```text +static void print_meta_data( char * desc_name ); +``` + +The following code specifies the column header information that the application will use when printing the metadata: + +```text +char *md1 = "col field data ret"; +char *md2 = "num name type len"; +char *md3 = "--- -------------------- ----------------- ---"; + +int main( int argc, char *argv[] ) +{ +``` + +The following declaration section identifies the host variables that will contain the name of the database to which the application will connect, the content of the SQL Statement, and a host variable that will hold the number of columns in the result set (if any). + +```text +EXEC SQL BEGIN DECLARE SECTION; + char *db = argv[1]; + char *stmt = argv[2]; + int col_count; +EXEC SQL END DECLARE SECTION; +``` + +The application connects to the database (using the default credentials): + +```text +EXEC SQL CONNECT TO :db; +``` + +Next, the application allocates an SQL descriptor to hold the metadata for a statement: + +```text +EXEC SQL ALLOCATE DESCRIPTOR parse_desc; +``` + +The application uses a `PREPARE` statement to syntax check the string provided by the user: + +```text +EXEC SQL PREPARE query FROM :stmt; +``` + +and a `DESCRIBE` statement to move the metadata for the query into the SQL descriptor. + +```text +EXEC SQL DESCRIBE query INTO SQL DESCRIPTOR parse_desc; +``` + +Then, the application interrogates the descriptor to discover the number of columns in the result set, and stores that in the host variable `col_count`. + +```text +EXEC SQL GET DESCRIPTOR parse_desc :col_count = COUNT; +``` + +If the column count is zero, the end user did not enter a `SELECT` statement; the application uses an `EXECUTE IMMEDIATE` statement to process the contents of the statement: + +```text +if( col_count == 0 ) +{ + EXEC SQL EXECUTE IMMEDIATE :stmt; +``` + +If the statement executes successfully, the application performs a `COMMIT`: + +```text +if( sqlca.sqlcode >= 0 ) + EXEC SQL COMMIT; +} +else +{ +``` + +If the statement entered by the user is a `SELECT` statement (which we know because the column count is non-zero), the application declares a variable named `row`. + +```text +int row; +``` + +Then, the application allocates another descriptor that holds the description and the values of a specific row in the result set: + +```text +EXEC SQL ALLOCATE DESCRIPTOR row_desc; +``` + +The application declares and opens a cursor for the prepared statement: + +```text +EXEC SQL DECLARE my_cursor CURSOR FOR query; +EXEC SQL OPEN my_cursor; +``` + +Loops through the rows in result set: + +```text +for( row = 0; ; row++ ) +{ + EXEC SQL BEGIN DECLARE SECTION; + int col; + EXEC SQL END DECLARE SECTION; +``` + +Then, uses a `FETCH` to retrieve the next row from the cursor into the descriptor: + +```text +EXEC SQL FETCH IN my_cursor INTO SQL DESCRIPTOR row_desc; +``` + +The application confirms that the `FETCH` did not fail; if the `FETCH` fails, the application has reached the end of the result set, and breaks the loop: + +```text +if( sqlca.sqlcode != 0 ) + break; +``` + +The application checks to see if this is the first row of the cursor; if it is, the application prints the metadata for the row. + +```text +if( row == 0 ) + print_meta_data( "row_desc" ); +``` + +Next, it prints a record header containing the row number: + +```text +printf("[RECORD %d]\n", row+1); +``` + +Then, it loops through each column in the row: + +```text +for( col = 1; col <= col_count; col++ ) +{ + EXEC SQL BEGIN DECLARE SECTION; + short ind; + varchar val[40+1]; + varchar name[20+1]; + EXEC SQL END DECLARE SECTION; +``` + +The application interrogates the row descriptor `(row_desc)` to copy the column value `(:val)`, null indicator `(:ind)` and column name `(:name)` into the host variables declared above. Notice that you can retrieve multiple items from a descriptor using a comma-separated list. + +```text +EXEC SQL GET DESCRIPTOR row_desc + VALUE :col + :val = DATA, :ind = INDICATOR, :name = NAME; +``` + +If the null indicator `(ind)` is negative, the column value is `NULL`; if the null indicator is greater than `0`, the column value is too long to fit into the val host variable (so we print `)`; otherwise, the null indicator is `0` (meaning `NOT NULL`) so we print the value. In each case, we prefix the value (or `` or ``) with the name of the column. + +```text +if( ind == -1 ) + printf( " %-20s : \n", name.arr ); +else if( ind > 0 ) + printf( " %-20s : \n", name.arr ); +else + printf( " %-20s : %s\n", name.arr, val.arr ); +} + +printf( "\n" ); +} +``` + +When the loop terminates, the application prints the number of rows fetched, and exits: + +```text + printf( "%d rows\n", row ); + } + +exit( 0 ); +} +``` + +The `print_meta_data()` function extracts the metadata from a descriptor and prints the name, data type, and length of each column: + +```text +static void print_meta_data( char *desc_name ) +{ +``` + +The application declares host variables: + +```text +EXEC SQL BEGIN DECLARE SECTION; + char *desc = desc_name; + int col_count; + int col; +EXEC SQL END DECLARE SECTION; +``` + +The application then defines an array of character strings that map data type values `(numeric)` into data type names. We use the numeric value found in the descriptor to index into this array. For example, if we find that a given column is of type `2`, we can find the name of that type `(NUMERIC)` by writing `types[2]`. + +```text +static char *types[] = +{ + "unused ", + "CHARACTER ", + "NUMERIC ", + "DECIMAL ", + "INTEGER ", + "SMALLINT ", + "FLOAT ", + "REAL ", + "DOUBLE ", + "DATE_TIME ", + "INTERVAL ", + "unused ", + "CHARACTER_VARYING", + "ENUMERATED ", + "BIT ", + "BIT_VARYING ", + "BOOLEAN ", + "abstract " +}; +``` + +The application retrieves the column count from the descriptor. Notice that the program refers to the descriptor using a host variable `(desc)` that contains the name of the descriptor. In most scenarios, you would use an identifier to refer to a descriptor, but in this case, the caller provided the descriptor name, so we can use a host variable to refer to the descriptor. + +```text +EXEC SQL GET DESCRIPTOR :desc :col_count = count; +``` + +The application prints the column headers (defined at the beginning of this application): + +```text +printf( "%s\n", md1 ); +printf( "%s\n", md2 ); +printf( "%s\n", md3 ); +``` + +Then, loops through each column found in the descriptor, and prints the name, type and length of each column. + +```text +for( col = 1; col <= col_count; col++ ) +{ + EXEC SQL BEGIN DECLARE SECTION; + int type; + int ret_len; + varchar name[21]; + EXEC SQL END DECLARE SECTION; + char *type_name; +``` + +It retrieves the name, type code, and length of the current column: + +```text +EXEC SQL GET DESCRIPTOR :desc + VALUE :col + :name = NAME, + :type = TYPE, + :ret_len = RETURNED_OCTET_LENGTH; +``` + +If the numeric type code matches a 'known' type code (that is, a type code found in the `types[]` array), it sets `type_name` to the name of the corresponding type; otherwise, it sets `type_name` to `"unknown"`. + +```text +if( type > 0 && type < SQL3_abstract ) + type_name = types[type]; +else + type_name = "unknown"; +``` + +and prints the column number, name, type name, and length: + +```text + printf( "%02d: %-20s %-17s %04d\n", + col, name.arr, type_name, ret_len ); + } + printf( "\n" ); +} +``` + +If you invoke the sample application with the following command: + +```text +./exec_stmt test "SELECT * FROM emp WHERE empno IN(7902, 7934)" +``` + +The application returns: + +```text +col field                data              ret +num name                 type              len +--- -------------------- ----------------- --- +01: empno                NUMERIC           0004 +02: ename                CHARACTER_VARYING 0004 +03: job                  CHARACTER_VARYING 0007 +04: mgr                  NUMERIC           0004 +05: hiredate             DATE_TIME         0018 +06: sal                  NUMERIC           0007 +07: comm                 NUMERIC           0000 +08: deptno               NUMERIC           0002 + +[RECORD 1] +  empno                : 7902 +  ename                : FORD +  job                  : ANALYST +  mgr                  : 7566 +  hiredate             : 03-DEC-81 00:00:00 +  sal                  : 3000.00 +  comm                 : +  deptno               : 20 + +[RECORD 2] +  empno                : 7934 +  ename                : MILLER +  job                  : CLERK +  mgr                  : 7782 +  hiredate             : 23-JAN-82 00:00:00 +  sal                  : 1300.00 +  comm                 : +  deptno               : 10 + +2 rows +``` diff --git a/product_docs/docs/epas/11/ecpgplus_guide/05_building_executing_dynamic_sql_statements.mdx b/product_docs/docs/epas/11/ecpgplus_guide/05_building_executing_dynamic_sql_statements.mdx new file mode 100644 index 00000000000..caf7707262c --- /dev/null +++ b/product_docs/docs/epas/11/ecpgplus_guide/05_building_executing_dynamic_sql_statements.mdx @@ -0,0 +1,793 @@ +--- +title: "Building and Executing Dynamic SQL Statements" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/13/building_executing_dynamic_sql_statements.html" +--- + + + +The following examples demonstrate four techniques for building and executing dynamic SQL statements. Each example demonstrates processing a different combination of statement and input types: + +- The first example demonstrates processing and executing a SQL statement that does not contain a `SELECT` statement and does not require input variables. This example corresponds to the techniques used by Oracle Dynamic SQL Method 1. +- The second example demonstrates processing and executing a SQL statement that does not contain a `SELECT` statement, and contains a known number of input variables. This example corresponds to the techniques used by Oracle Dynamic SQL Method 2. +- The third example demonstrates processing and executing a SQL statement that may contain a `SELECT` statement, and includes a known number of input variables. This example corresponds to the techniques used by Oracle Dynamic SQL Method 3. +- The fourth example demonstrates processing and executing a SQL statement that may contain a `SELECT` statement, and includes an unknown number of input variables. This example corresponds to the techniques used by Oracle Dynamic SQL Method 4. + +## Example - Executing a Non-query Statement Without Parameters + +The following example demonstrates how to use the `EXECUTE IMMEDIATE` command to execute a SQL statement where the text of the statement is not known until you run the application. You cannot use `EXECUTE IMMEDIATE` to execute a statement that returns a result set. You cannot use `EXECUTE IMMEDIATE` to execute a statement that contains parameter placeholders. + +The `EXECUTE IMMEDIATE` statement parses and plans the SQL statement each time it executes, which can have a negative impact on the performance of your application. If you plan to execute the same statement repeatedly, consider using the `PREPARE/EXECUTE` technique described in the next example. + +```text +/***********************************************************/ +#include +#include +#include + +static void handle_error(void); + +int main(int argc, char *argv[]) +{ + char *insertStmt; + + EXEC SQL WHENEVER SQLERROR DO handle_error(); + + EXEC SQL CONNECT :argv[1]; + + insertStmt = "INSERT INTO dept VALUES(50, 'ACCTG', 'SEATTLE')"; + + EXEC SQL EXECUTE IMMEDIATE :insertStmt; + + fprintf(stderr, "ok\n"); + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} + + +static void handle_error(void) +{ + fprintf(stderr, "%s\n", sqlca.sqlerrm.sqlerrmc); + + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + + exit(EXIT_FAILURE); +} + +/***********************************************************/ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio, string`, and `stdlib` libraries, and providing basic infrastructure for the program: + +```text +#include +#include +#include + +static void handle_error(void); +int main(int argc, char *argv[]) +{ + char *insertStmt; +``` + +The example then sets up an error handler; ECPGPlus calls the `handle_error()` function whenever a SQL error occurs: + +```text +EXEC SQL WHENEVER SQLERROR DO handle_error(); +``` + +Then, the example connects to the database using the credentials specified on the command line: + +```text +EXEC SQL CONNECT :argv[1]; +``` + +Next, the program uses an `EXECUTE IMMEDIATE` statement to execute a SQL statement, adding a row to the `dept` table: + +```text +insertStmt = "INSERT INTO dept VALUES(50, 'ACCTG', 'SEATTLE')"; + +EXEC SQL EXECUTE IMMEDIATE :insertStmt; +``` + +If the `EXECUTE IMMEDIATE` command fails for any reason, ECPGPlus will invoke the `handle_error()` function (which terminates the application after displaying an error message to the user). If the `EXECUTE IMMEDIATE` command succeeds, the application displays a message `(ok)` to the user, commits the changes, disconnects from the server, and terminates the application. + +```text + fprintf(stderr, "ok\n"); + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} +``` + +ECPGPlus calls the `handle_error()` function whenever it encounters a SQL error. The `handle_error()` function prints the content of the error message, resets the error handler, rolls back any changes, disconnects from the database, and terminates the application. + +```text +static void handle_error(void) +{ + fprintf(stderr, "%s\n", sqlca.sqlerrm.sqlerrmc); + + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + + exit(EXIT_FAILURE); +} +``` + +## Example - Executing a Non-query Statement with a Specified Number of Placeholders + +To execute a non-query command that includes a known number of parameter placeholders, you must first `PREPARE` the statement (providing a *statement handle*), and then `EXECUTE` the statement using the statement handle. When the application executes the statement, it must provide a *value* for each placeholder found in the statement. + +When an application uses the `PREPARE/EXECUTE` mechanism, each SQL statement is parsed and planned once, but may execute many times (providing different *values* each time). + +ECPGPlus will convert each parameter value to the type required by the SQL statement, if possible; if not possible, ECPGPlus will report an error. + +```text +/***********************************************************/ +#include +#include +#include +#include + +static void handle_error(void); + +int main(int argc, char *argv[]) +{ + char *stmtText; + + EXEC SQL WHENEVER SQLERROR DO handle_error(); + + EXEC SQL CONNECT :argv[1]; + + stmtText = "INSERT INTO dept VALUES(?, ?, ?)"; + + EXEC SQL PREPARE stmtHandle FROM :stmtText; + + EXEC SQL EXECUTE stmtHandle USING :argv[2], :argv[3], :argv[4]; + + fprintf(stderr, "ok\n"); + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} + +static void handle_error(void) +{ + printf("%s\n", sqlca.sqlerrm.sqlerrmc); + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + + exit(EXIT_FAILURE); +} +/***********************************************************/ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio, string, stdlib`, and `sqlca` libraries, and providing basic infrastructure for the program: + +```text +#include +#include +#include +#include + +static void handle_error(void); + +int main(int argc, char *argv[]) +{ + char *stmtText; +``` + +The example then sets up an error handler; ECPGPlus calls the `handle_error()` function whenever a SQL error occurs: + +```text +EXEC SQL WHENEVER SQLERROR DO handle_error(); +``` + +Then, the example connects to the database using the credentials specified on the command line: + +```text +EXEC SQL CONNECT :argv[1]; +``` + +Next, the program uses a `PREPARE` statement to parse and plan a statement that includes three parameter markers - if the `PREPARE` statement succeeds, it will create a statement handle that you can use to execute the statement (in this example, the statement handle is named `stmtHandle`). You can execute a given statement multiple times using the same statement handle. + +```text +stmtText = "INSERT INTO dept VALUES(?, ?, ?)"; + +EXEC SQL PREPARE stmtHandle FROM :stmtText; +``` + +After parsing and planning the statement, the application uses the `EXECUTE` statement to execute the statement associated with the statement handle, substituting user-provided values for the parameter markers: + +```text +EXEC SQL EXECUTE stmtHandle USING :argv[2], :argv[3], :argv[4]; +``` + +If the `EXECUTE` command fails for any reason, ECPGPlus will invoke the `handle_error()` function (which terminates the application after displaying an error message to the user). If the `EXECUTE` command succeeds, the application displays a message `(ok)` to the user, commits the changes, disconnects from the server, and terminates the application. + +```text + fprintf(stderr, "ok\n"); + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} +``` + +ECPGPlus calls the `handle_error()` function whenever it encounters a SQL error. The `handle_error()` function prints the content of the error message, resets the error handler, rolls back any changes, disconnects from the database, and terminates the application. + +```text +static void handle_error(void) +{ + printf("%s\n", sqlca.sqlerrm.sqlerrmc); + + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + exit(EXIT_FAILURE); +} +``` + +## Example - Executing a Query With a Known Number of Placeholders + +This example demonstrates how to execute a *query* with a known number of input parameters, and with a known number of columns in the result set. This method uses the `PREPARE` statement to parse and plan a query, before opening a cursor and iterating through the result set. + +```text +/***********************************************************/ +#include +#include +#include +#include +#include + +static void handle_error(void); + +int main(int argc, char *argv[]) +{ + VARCHAR empno[10]; + VARCHAR ename[20]; + + EXEC SQL WHENEVER SQLERROR DO handle_error(); + + EXEC SQL CONNECT :argv[1]; + + EXEC SQL PREPARE queryHandle + FROM "SELECT empno, ename FROM emp WHERE deptno = ?"; + + EXEC SQL DECLARE empCursor CURSOR FOR queryHandle; + + EXEC SQL OPEN empCursor USING :argv[2]; + + EXEC SQL WHENEVER NOT FOUND DO break; + + while(true) + { + + EXEC SQL FETCH empCursor INTO :empno, :ename; + + printf("%-10s %s\n", empno.arr, ename.arr); + } + + EXEC SQL CLOSE empCursor; + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} + +static void handle_error(void) +{ + printf("%s\n", sqlca.sqlerrm.sqlerrmc); + + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + + exit(EXIT_FAILURE); +} + +/***********************************************************/ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio, string, stdlib, stdbool`, and `sqlca` libraries, and providing basic infrastructure for the program: + +```text +#include +#include +#include +#include +#include + +static void handle_error(void); + +int main(int argc, char *argv[]) +{ + VARCHAR empno[10]; + VARCHAR ename[20]; +``` + +The example then sets up an error handler; ECPGPlus calls the `handle_error()` function whenever a SQL error occurs: + +```text +EXEC SQL WHENEVER SQLERROR DO handle_error(); +``` + +Then, the example connects to the database using the credentials specified on the command line: + +```text +EXEC SQL CONNECT :argv[1]; +``` + +Next, the program uses a `PREPARE` statement to parse and plan a query that includes a single parameter marker - if the `PREPARE` statement succeeds, it will create a statement handle that you can use to execute the statement (in this example, the statement handle is named `stmtHandle`). You can execute a given statement multiple times using the same statement handle. + +```text +EXEC SQL PREPARE stmtHandle + FROM "SELECT empno, ename FROM emp WHERE deptno = ?"; +``` + +The program then declares and opens the cursor, `empCursor`, substituting a user-provided value for the parameter marker in the prepared `SELECT` statement. Notice that the `OPEN` statement includes a `USING` clause: the `USING` clause must provide a *value* for each placeholder found in the query: + +```text +EXEC SQL DECLARE empCursor CURSOR FOR stmtHandle; + +EXEC SQL OPEN empCursor USING :argv[2]; + +EXEC SQL WHENEVER NOT FOUND DO break; + +while(true) +{ +``` + +The program iterates through the cursor, and prints the employee number and name of each employee in the selected department: + +```text + EXEC SQL FETCH empCursor INTO :empno, :ename; + + printf("%-10s %s\n", empno.arr, ename.arr); +} +``` + +The program then closes the cursor, commits any changes, disconnects from the server, and terminates the application. + +```text + EXEC SQL CLOSE empCursor; + + EXEC SQL COMMIT RELEASE; + + exit(EXIT_SUCCESS); +} +``` + +The application calls the `handle_error()` function whenever it encounters a SQL error. The `handle_error()` function prints the content of the error message, resets the error handler, rolls back any changes, disconnects from the database, and terminates the application. + +```text +static void handle_error(void) +{ + printf("%s\n", sqlca.sqlerrm.sqlerrmc); + + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + + exit(EXIT_FAILURE); +} +``` + + + +## Example - Executing a Query With an Unknown Number of Variables + +The next example demonstrates executing a query with an unknown number of input parameters and/or columns in the result set. This type of query may occur when you prompt the user for the text of the query, or when a query is assembled from a form on which the user chooses from a number of conditions (i.e., a filter). + +```text +/***********************************************************/ +#include +#include +#include +#include + +SQLDA *params; +SQLDA *results; + +static void allocateDescriptors(int count, + int varNameLength, + int indNameLenth); +static void bindParams(void); +static void displayResultSet(void); + +int main(int argc, char *argv[]) +{ + EXEC SQL BEGIN DECLARE SECTION; + char *username = argv[1]; + char *password = argv[2]; + char *stmtText = argv[3]; + EXEC SQL END DECLARE SECTION; + + EXEC SQL WHENEVER SQLERROR sqlprint; + + EXEC SQL CONNECT TO test + USER :username + IDENTIFIED BY :password; + + params = sqlald(20, 64, 64); + results = sqlald(20, 64, 64); + + EXEC SQL PREPARE stmt FROM :stmtText; + + EXEC SQL DECLARE dynCursor CURSOR FOR stmt; + + bindParams(); + + EXEC SQL OPEN dynCursor USING DESCRIPTOR params; + + displayResultSet(20); +} + +static void bindParams(void) +{ + EXEC SQL DESCRIBE BIND VARIABLES FOR stmt INTO params; + + if (params->F < 0) + fprintf(stderr, "Too many parameters required\n"); + else + { + int i; + + params->N = params->F; + + for (i = 0; i < params->F; i++) + { + char *paramName = params->S[i]; + int nameLen = params->C[i]; + char paramValue[255]; + + printf("Enter value for parameter %.*s: ", + nameLen, paramName); + + fgets(paramValue, sizeof(paramValue), stdin); + + params->T[i] = 1; /* Data type = Character (1) */ + params->L[i] = strlen(paramValue) - 1; + params->V[i] = strdup(paramValue); + } + } +} + +static void displayResultSet(void) +{ + EXEC SQL DESCRIBE SELECT LIST FOR stmt INTO results; + + if (results->F < 0) + fprintf(stderr, "Too many columns returned by query\n"); + else if (results->F == 0) + return; + else + { + int col; + + results->N = results->F; + + for (col = 0; col < results->F; col++) + { + int null_permitted, length; + + sqlnul(&results->T[col], + &results->T[col], + &null_permitted); + + switch (results->T[col]) + { + case 2: /* NUMERIC */ + { + int precision, scale; + + sqlprc(&results->L[col], &precision, &scale); + + if (precision == 0) + precision = 38; + + length = precision + 3; + break; + } + + case 12: /* DATE */ + { + length = 30; + break; + } + + default: /* Others */ + { + length = results->L[col] + 1; + break; + } + } + + results->V[col] = realloc(results->V[col], length); + results->L[col] = length; + results->T[col] = 1; + } + + EXEC SQL WHENEVER NOT FOUND DO break; + + while (1) + { + const char *delimiter = ""; + + EXEC SQL FETCH dynCursor USING DESCRIPTOR results; + + for (col = 0; col < results->F; col++) + { + if (*results->I[col] == -1) + printf("%s%s", delimiter, ""); + else + printf("%s%s", delimiter, results->V[col]); + delimiter = ", "; + } + + + printf("\n"); + } + } +} +/***********************************************************/ +``` + +The code sample begins by including the prototypes and type definitions for the C `stdio` and `stdlib` libraries. In addition, the program includes the `sqlda.h` and `sqlcpr.h` header files. `sqlda.h` defines the SQLDA structure used throughout this example. `sqlcpr.h` defines a small set of functions used to interrogate the metadata found in an SQLDA structure. + +```text +#include +#include +#include +#include +``` + +Next, the program declares pointers to two SQLDA structures. The first SQLDA structure `(params)` will be used to describe the metadata for any parameter markers found in the dynamic query text. The second SQLDA structure `(results)` will contain both the metadata and the result set obtained by executing the dynamic query. + +```text +SQLDA *params; +SQLDA *results; +``` + +The program then declares two helper functions (defined near the end of the code sample): + +```text +static void bindParams(void); +static void displayResultSet(void); +``` + +Next, the program declares three host variables; the first two (`username` and `password`) are used to connect to the database server; the third host variable `(stmtTxt)` is a NULL-terminated C string containing the text of the query to execute. Notice that the values for these three host variables are derived from the command-line arguments. When the program begins execution, it sets up an error handler and then connects to the database server: + +```text +int main(int argc, char *argv[]) +{ + EXEC SQL BEGIN DECLARE SECTION; + char *username = argv[1]; + char *password = argv[2]; + char *stmtText = argv[3]; + EXEC SQL END DECLARE SECTION; + + EXEC SQL WHENEVER SQLERROR sqlprint; + EXEC SQL CONNECT TO test + USER :username + IDENTIFIED BY :password; +``` + +Next, the program calls the `sqlald()` function to allocate the memory required for each descriptor. Each descriptor contains (among other things): + +- a pointer to an array of column names +- a pointer to an array of indicator names +- a pointer to an array of data types +- a pointer to an array of lengths +- a pointer to an array of data values. + +When you allocate an `SQLDA` descriptor, you specify the maximum number of columns you expect to find in the result set (for `SELECT`-list descriptors) or the maximum number of parameters you expect to find the dynamic query text (for bind-variable descriptors) - in this case, we specify that we expect no more than 20 columns and 20 parameters. You must also specify a maximum length for each column (or parameter) name and each indicator variable name - in this case, we expect names to be no more than 64 bytes long. + +See [SQLDA Structure](07_reference/#sqlda_structure) section for a complete description of the `SQLDA` structure. + +```text +params = sqlald(20, 64, 64); +results = sqlald(20, 64, 64); +``` + +After allocating the `SELECT`-list and bind descriptors, the program prepares the dynamic statement and declares a cursor over the result set. + +```text +EXEC SQL PREPARE stmt FROM :stmtText; + +EXEC SQL DECLARE dynCursor CURSOR FOR stmt; +``` + +Next, the program calls the `bindParams()` function. The `bindParams()` function examines the bind descriptor `(params)` and prompt the user for a value to substitute in place of each parameter marker found in the dynamic query. + +```text +bindParams(); +``` + +Finally, the program opens the cursor (using the parameter values supplied by the user, if any) and calls the `displayResultSet()` function to print the result set produced by the query. + +```text + EXEC SQL OPEN dynCursor USING DESCRIPTOR params; + + displayResultSet(); +} +``` + +The `bindParams()` function determines whether the dynamic query contains any parameter markers, and, if so, prompts the user for a value for each parameter and then binds that value to the corresponding marker. The `DESCRIBE BIND VARIABLE` statement populates the `params` SQLDA structure with information describing each parameter marker. + +```text +static void bindParams(void) +{ + EXEC SQL DESCRIBE BIND VARIABLES FOR stmt INTO params; +``` + +If the statement contains no parameter markers, `params->F` will contain 0. If the statement contains more parameters than will fit into the descriptor, `params->F` will contain a negative number (in this case, the absolute value of `params->F` indicates the number of parameter markers found in the statement). If `params->F` contains a positive number, that number indicates how many parameter markers were found in the statement. + +```text +if (params->F < 0) + fprintf(stderr, "Too many parameters required\n"); +else +{ + int i; + + params->N = params->F; +``` + +Next, the program executes a loop that prompts the user for a value, iterating once for each parameter marker found in the statement. + +```text +for (i = 0; i < params->F; i++) +{ + char *paramName = params->S[i]; + int nameLen = params->C[i]; + char paramValue[255]; + + printf("Enter value for parameter %.*s: ", + nameLen, paramName); + + fgets(paramValue, sizeof(paramValue), stdin); +``` + +After prompting the user for a value for a given parameter, the program *binds* that value to the parameter by setting `params->T[i]` to indicate the data type of the value (see `Type Codes` for a list of type codes), `params->L[i]` to the length of the value (we subtract one to trim off the trailing new-line character added by `fgets()`), and `params->V[i]` to point to a copy of the NULL-terminated string provided by the user. + +```text + params->T[i] = 1; /* Data type = Character (1) */ + params->L[i] = strlen(paramValue) + 1; + params->V[i] = strdup(paramValue); + } + } +} +``` + +The `displayResultSet()` function loops through each row in the result set and prints the value found in each column. `displayResultSet()` starts by executing a `DESCRIBE SELECT LIST` statement - this statement populates an SQLDA descriptor `(results)` with a description of each column in the result set. + +```text +static void displayResultSet(void) +{ + EXEC SQL DESCRIBE SELECT LIST FOR stmt INTO results; +``` + +If the dynamic statement returns no columns (that is, the dynamic statement is not a `SELECT` statement), `results->F` will contain 0. If the statement returns more columns than will fit into the descriptor, `results->F` will contain a negative number (in this case, the absolute value of `results->F` indicates the number of columns returned by the statement). If `results->F` contains a positive number, that number indicates how many columns where returned by the query. + +```text +if (results->F < 0) + fprintf(stderr, "Too many columns returned by query\n"); +else if (results->F == 0) + return; +else +{ + int col; + + results->N = results->F; +``` + +Next, the program enters a loop, iterating once for each column in the result set: + +```text +for (col = 0; col < results->F; col++) +{ + int null_permitted, length; +``` + +To decode the type code found in `results->T`, the program invokes the `sqlnul()` function (see the description of the `T` member of the SQLDA structure in the [The SQLDA Structure](07_reference/#sqlda_structure)). This call to `sqlnul()` modifies `results->T[col]` to contain only the type code (the nullability flag is copied to `null_permitted`). This step is necessary because the `DESCRIBE SELECT LIST` statement encodes the type of each column and the nullability of each column into the `T` array. + +```text +sqlnul(&results->T[col], + &results->T[col], + &null_permitted); +``` + +After decoding the actual data type of the column, the program modifies the results descriptor to tell ECPGPlus to return each value in the form of a NULL-terminated string. Before modifying the descriptor, the program must compute the amount of space required to hold each value. To make this computation, the program examines the maximum length of each column `(results->V[col])` and the data type of each column `(results->T[col])`. + +For numeric values (where `results->T[col] = 2`), the program calls the `sqlprc()` function to extract the precision and scale from the column length. To compute the number of bytes required to hold a numeric value in string form, `displayResultSet()` starts with the precision (that is, the maximum number of digits) and adds three bytes for a sign character, a decimal point, and a NULL terminator. + +```text +switch (results->T[col]) +{ + case 2: /* NUMERIC */ + { + int precision, scale; + + sqlprc(&results->L[col], &precision, &scale); + + if (precision == 0) + precision = 38; + length = precision + 3; + break; + } +``` + +For date values, the program uses a somewhat arbitrary, hard-coded length of 30. In a real-world application, you may want to more carefully compute the amount of space required. + +```text +case 12: /* DATE */ +{ + length = 30; + break; +} +``` + +For a value of any type other than date or numeric, `displayResultSet()` starts with the maximum column width reported by `DESCRIBE SELECT LIST` and adds one extra byte for the NULL terminator. Again, in a real-world application you may want to include more careful calculations for other data types. + +```text + default: /* Others */ + { + length = results->L[col] + 1; + break; + } +} +``` + +After computing the amount of space required to hold a given column, the program allocates enough memory to hold the value, sets `results->L[col]` to indicate the number of bytes found at `results->V[col]`, and set the type code for the column `(results->T[col])` to `1` to instruct the upcoming `FETCH` statement to return the value in the form of a NULL-terminated string. + +```text + results->V[col] = malloc(length); + results->L[col] = length; + results->T[col] = 1; +} +``` + +At this point, the results descriptor is configured such that a `FETCH` statement can copy each value into an appropriately sized buffer in the form of a NULL-terminated string. + +Next, the program defines a new error handler to break out of the upcoming loop when the cursor is exhausted. + +```text +EXEC SQL WHENEVER NOT FOUND DO break; + +while (1) +{ + const char *delimiter = ""; +``` + +The program executes a `FETCH` statement to fetch the next row in the cursor into the `results` descriptor. If the `FETCH` statement fails (because the cursor is exhausted), control transfers to the end of the loop because of the `EXEC SQL WHENEVER` directive found before the top of the loop. + + `EXEC SQL FETCH dynCursor USING DESCRIPTOR results;` + +The `FETCH` statement will populate the following members of the results descriptor: + +- `*results->I[col]` will indicate whether the column contains a NULL value `(-1)` or a non-NULL value `(0)`. If the value non-NULL but too large to fit into the space provided, the value is truncated and `*results->I[col]` will contain a positive value. +- `results->V[col]` will contain the value fetched for the given column `(unless *results->I[col]` indicates that the column value is NULL). +- `results->L[col]` will contain the length of the value fetched for the given column + +Finally, `displayResultSet()` iterates through each column in the result set, examines the corresponding NULL indicator, and prints the value. The result set is not aligned - instead, each value is separated from the previous value by a comma. + +```text + for (col = 0; col < results->F; col++) + { + if (*results->I[col] == -1) + printf("%s%s", delimiter, ""); + else + printf("%s%s", delimiter, results->V[col]); + delimiter = ", "; + } + + printf("\n"); + } + } +} +/***********************************************************/ +``` diff --git a/product_docs/docs/epas/11/ecpgplus_guide/06_error_handling.mdx b/product_docs/docs/epas/11/ecpgplus_guide/06_error_handling.mdx new file mode 100644 index 00000000000..73bbb350e6a --- /dev/null +++ b/product_docs/docs/epas/11/ecpgplus_guide/06_error_handling.mdx @@ -0,0 +1,214 @@ +--- +title: "Error Handling" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/13/error_handling.html" +--- + + + +ECPGPlus provides two methods to detect and handle errors in embedded SQL code: + +- A client application can examine the `sqlca` data structure for error messages, and supply customized error handling for your client application. +- A client application can include `EXEC SQL WHENEVER` directives to instruct the ECPGPlus compiler to add error-handling code. + +## Error Handling with sqlca + +`sqlca` (SQL communications area) is a global variable used by `ecpglib` to communicate information from the server to the client application. After executing a SQL statement (for example, an `INSERT` or `SELECT` statement) you can inspect the contents of `sqlca` to determine if the statement has completed successfully or if the statement has failed. + +`sqlca` has the following structure: + +```text +struct +{ + char sqlcaid[8]; + long sqlabc; + long sqlcode; + struct + { + int sqlerrml; + char sqlerrmc[SQLERRMC_LEN]; + } sqlerrm; + char sqlerrp[8]; + long sqlerrd[6]; + char sqlwarn[8]; + char sqlstate[5]; + +} sqlca; +``` + +Use the following directive to implement `sqlca` functionality: + +```text +EXEC SQL INCLUDE sqlca; +``` + +If you include the `ecpg` directive, you do not need to `#include` the `sqlca.h` file in the client application's header declaration. + +The Advanced Server `sqlca` structure contains the following members: + +`sqlcaid` + + `sqlcaid` contains the string: `"SQLCA"`. + +`sqlabc` + + `sqlabc` contains the size of the `sqlca` structure. + +`sqlcode` + + The `sqlcode` member has been deprecated with SQL 92; Advanced Server supports `sqlcode` for backward compatibility, but you should use the `sqlstate` member when writing new code. + + `sqlcode` is an integer value; a positive `sqlcode` value indicates that the client application has encountered a harmless processing condition, while a negative value indicates a warning or error. + + If a statement processes without error, `sqlcode` will contain a value of `0`. If the client application encounters an error (or warning) during a statement's execution, `sqlcode` will contain the last code returned. + + The SQL standard defines only a positive value of 100, which indicates that he most recent SQL statement processed returned/affected no rows. Since the SQL standard does not define other `sqlcode` values, please be aware that the values assigned to each condition may vary from database to database. + +`sqlerrm` is a structure embedded within `sqlca`, composed of two members: + +`sqlerrml` + + `sqlerrml` contains the length of the error message currently stored in `sqlerrmc`. + +`sqlerrmc` + + `sqlerrmc` contains the null-terminated message text associated with the code stored in `sqlstate`. If a message exceeds 149 characters in length, `ecpglib` will truncate the error message. + +`sqlerrp` + + `sqlerrp` contains the string `"NOT SET"`. + +`sqlerrd` is an array that contains six elements: + +- `sqlerrd[1]` contains the OID of the processed row (if applicable). + +- `sqlerrd[2]` contains the number of processed or returned rows. + +- `sqlerrd[0], sqlerrd[3], sqlerrd[4]` and `sqlerrd[5]` are unused. + +`sqlwarn` is an array that contains 8 characters: + +- `sqlwarn[0]` contains a value of `'W'` if any other element within `sqlwarn` is set to `'W'`. + +- `sqlwarn[1]` contains a value of `'W'` if a data value was truncated when it was stored in a host variable. + +- `sqlwarn[2]` contains a value of `'W'` if the client application encounters a non-fatal warning. + +- `sqlwarn[3], sqlwarn[4], sqlwarn[5], sqlwarn[6]`, and `sqlwarn[7]` are unused. + +`sqlstate` + + `sqlstate` is a 5 character array that contains a SQL-compliant status code after the execution of a statement from the client application. If a statement processes without error, `sqlstate` will contain a value of `00000`. Please note that `sqlstate` is *not* a null-terminated string. + + `sqlstate` codes are assigned in a hierarchical scheme: + +- The first two characters of `sqlstate` indicate the general class of the condition. +- The last three characters of `sqlstate` indicate a specific status within the class. + +If the client application encounters multiple errors (or warnings) during an SQL statement's execution `sqlstate` will contain the last code returned. + +The following table lists the `sqlstate` and `sqlcode` values, as well as the symbolic name and error description for the related condition: + +| sqlstate | sqlcode (Deprecated) | Symbolic Name | Description | +| ------------------- | -------------------- | ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `YE001` | `-12` | `ECPG_OUT_OF_MEMORY` | Virtual memory is exhausted. | +| `YE002` | `-200` | `ECPG_UNSUPPORTED` | The preprocessor has generated an unrecognized item. Could indicate incompatibility between the preprocessor and the library. | +| `07001`, or `07002` | `-201` | `ECPG_TOO_MANY_ARGUMENTS` | The program specifies more variables than the command expects. | +| `07001`, or `07002` | `-202` | `ECPG_TOO_FEW_ARGUMENTS` | The program specified fewer variables than the command expects. | +| `21000` | `-203` | `ECPG_TOO_MANY_MATCHES` | The SQL command has returned multiple rows, but the statement was prepared to receive a single row. | +| `42804` | `-204` | `ECPG_INT_FORMAT` | The host variable (defined in the C code) is of type INT, and the selected data is of a type that cannot be converted into an INT. `ecpglib` uses the `strtol()` function to convert string values into numeric form. | +| `42804` | `-205` | `ECPG_UINT_FORMAT` | The host variable (defined in the C code) is an unsigned INT, and the selected data is of a type that cannot be converted into an unsigned INT. `ecpglib` uses the `strtoul()` function to convert string values into numeric form. | +| `42804` | `-206` | `ECPG_FLOAT_FORMAT` | The host variable (defined in the C code) is of type FLOAT, and the selected data is of a type that cannot be converted into an FLOAT. `ecpglib` uses the `strtod()` function to convert string values into numeric form. | +| `42804` | `-211` | `ECPG_CONVERT_BOOL` | The host variable (defined in the C code) is of type BOOL, and the selected data cannot be stored in a BOOL. | +| `YE002` | `-2-1` | `ECPG_EMPTY` | The statement sent to the server was empty. | +| `22002` | `-213` | `ECPG_MISSING_INDICATOR` | A NULL indicator variable has not been supplied for the NULL value returned by the server (the client application has received an unexpected NULL value). | +| `42804` | `-214` | `ECPG_NO_ARRAY` | The server has returned an array, and the corresponding host variable is not capable of storing an array. | +| `42804` | `-215` | `ECPG_DATA_NOT_ARRAY` | The server has returned a value that is not an array into a host variable that expects an array value. | +| `08003` | `-220` | `ECPG_NO_CONN` | The client application has attempted to use a non-existent connection. | +| `YE002` | `-221` | `ECPG_NOT_CONN` | The client application has attempted to use an allocated, but closed connection. | +| `26000` | `-230` | `ECPG_INVALID_STMT` | The statement has not been prepared. | +| `33000` | `-240` | `ECPG_UNKNOWN_DESCRIPTOR` | The specified descriptor is not found. | +| `07009` | `-241` | `ECPG_INVALID_DESCRIPTOR_INDEX` | The descriptor index is out-of-range. | +| `YE002` | `-242` | `ECPG_UNKNOWN_DESCRIPTOR_ITEM` | The client application has requested an invalid descriptor item (internal error). | +| `07006` | `-243` | `ECPG_VAR_NOT_NUMERIC` | A dynamic statement has returned a numeric value for a non-numeric host variable. | +| `07006` | `-244` | `ECPG_VAR_NOT_CHAR` | A dynamic SQL statement has returned a CHAR value, and the host variable is not a CHAR. | +| | `-400` | `ECPG_PGSQL` | The server has returned an error message; the resulting message contains the error text. | +| `08007` | `-401` | `ECPG_TRANS` | The server cannot start, commit or rollback the specified transaction. | +| `08001` | `-402` | `ECPG_CONNECT` | The client application's attempt to connect to the database has failed. | +| `02000` | `100` | `ECPG_NOT_FOUND` | The last command retrieved or processed no rows, or you have reached the end of a cursor. | + +## EXEC SQL WHENEVER + +Use the `EXEC SQL WHENEVER` directive to implement simple error handling for client applications compiled with ECPGPlus. The syntax of the directive is: + +```text +EXEC SQL WHENEVER ; +``` + +This directive instructs the ECPG compiler to insert error-handling code into your program. + +The code instructs the client application that it should perform a specified action if the client application detects a given condition. The *condition* may be one of the following: + +`SQLERROR` + + A `SQLERROR` condition exists when `sqlca.sqlcode` is less than zero. + +`SQLWARNING` + + A `SQLWARNING` condition exists when `sqlca.sqlwarn[0]` contains a `'W'`. + +`NOT FOUND` + + A `NOT FOUND` condition exists when `sqlca.sqlcode` is `ECPG_NOT_FOUND` (when a query returns no data). + +You can specify that the client application perform one of the following *actions* if it encounters one of the previous conditions: + +`CONTINUE` + + Specify `CONTINUE` to instruct the client application to continue processing, ignoring the current `condition`. `CONTINUE` is the default action. + +`DO CONTINUE` + + An action of `DO CONTINUE` will generate a `CONTINUE` statement in the emitted C code that if it encounters the condition, skips the rest of the code in the loop and continues with the next iteration. You can only use it within a loop. + +`GOTO label` + + or + +`GO TO label` + + Use a C `goto` statement to jump to the specified `label`. + +`SQLPRINT` + + Print an error message to `stderr` (standard error), using the `sqlprint()` function. The `sqlprint()` function prints `sql error`, followed by the contents of `sqlca.sqlerrm.sqlerrmc`. + +`STOP` + + Call `exit(1)` to signal an error, and terminate the program. + +`DO BREAK` + + Execute the C `break` statement. Use this action in loops, or `switch` statements. + +`CALL name(args)` + +or + +`DO name(args)` + + Invoke the C function specified by the name `parameter`, using the parameters specified in the `args` parameter. + +**Example:** + +The following code fragment prints a message if the client application encounters a warning, and aborts the application if it encounters an error: + +```text +EXEC SQL WHENEVER SQLWARNING SQLPRINT; +EXEC SQL WHENEVER SQLERROR STOP; +``` + +!!! Note + The ECPGPlus compiler processes your program from top to bottom, even though the client application may not *execute* from top to bottom. The compiler directive is applied to each line in order, and remains in effect until the compiler encounters another directive. If the control of the flow within your program is not top-to-bottom, you should consider adding error-handling directives to any parts of the program that may be inadvertently missed during compilation. diff --git a/product_docs/docs/epas/11/ecpgplus_guide/07_reference.mdx b/product_docs/docs/epas/11/ecpgplus_guide/07_reference.mdx new file mode 100644 index 00000000000..11ca76d8800 --- /dev/null +++ b/product_docs/docs/epas/11/ecpgplus_guide/07_reference.mdx @@ -0,0 +1,1593 @@ +--- +title: "Reference" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/13/reference.html" +--- + + + +The sections that follow describe ecpgPlus language elements: + +- C-Preprocessor Directives +- Supported C Data Types +- Type Codes +- The SQLDA Structure +- ECPGPlus Statements + +## C-preprocessor Directives + +The ECPGPlus C-preprocessor enforces two behaviors that are dependent on the mode in which you invoke ECPGPlus: + +- `PROC` mode +- non-`PROC` mode + +**Compiling in PROC Mode** + +In `PROC` mode, ECPGPlus allows you to: + +- Declare host variables outside of an `EXEC SQL BEGIN/END DECLARE SECTION`. +- Use any C variable as a host variable as long as it is of a data type compatible with ECPG. + +When you invoke ECPGPlus in `PROC` mode (by including the `-C PROC` keywords), the ECPG compiler honors the following C-preprocessor directives: + +```text +#include +#if expression +#ifdef symbolName +#ifndef symbolName +#else +#elif expression +#endif +#define symbolName expansion +#define symbolName([macro arguments]) expansion +#undef symbolName +#defined(symbolName) +``` + +Pre-processor directives are used to effect or direct the code that is received by the compiler. For example, using the following code sample: + +```text +#if HAVE_LONG_LONG == 1 +#define BALANCE_TYPE long long +#else +#define BALANCE_TYPE double +#endif +... +BALANCE_TYPE customerBalance; +``` + +If you invoke ECPGPlus with the following command-line arguments: + +```text +ecpg –C PROC –DHAVE_LONG_LONG=1 +``` + +ECPGPlus will copy the entire fragment (without change) to the output file, but will only send the following tokens to the ECPG parser: + +```text +long long customerBalance; +``` + +On the other hand, if you invoke ECPGPlus with the following command-line arguments: + +```text +ecpg –C PROC –DHAVE_LONG_LONG=0 +``` + +The ECPG parser will receive the following tokens: + +```text +double customerBalance; +``` + +If your code uses preprocessor directives to filter the code that is sent to the compiler, the complete code is retained in the original code, while the ECPG parser sees only the processed token stream. + +You can also use compatible syntax when executing the following preprocessor directives with an `EXEC` directive: + +```text +EXEC ORACLE DEFINE +EXEC ORACLE UNDEF +EXEC ORACLE INCLUDE +EXEC ORACLE IFDEF +EXEC ORACLE IFNDEF +EXEC ORACLE ELIF +EXEC ORACLE ELSE +EXEC ORACLE ENDIF +EXEC ORACLE OPTION +``` + +For example, if your code includes the following: + +```text +EXEC ORACLE IFDEF HAVE_LONG_LONG; +#define BALANCE_TYPE long long +EXEC ORACLE ENDIF; +BALANCE_TYPE customerBalance; +``` + +If you invoke ECPGPlus with the following command-line arguments: + +```text +ecpg –C PROC DEFINE=HAVE_LONG_LONG=1 +``` + +ECPGPlus will send the following tokens to the output file, and the ECPG parser: + +```text +long long customerBalance; +``` + +!!! Note + The `EXEC ORACLE` pre-processor directives only work if you specify `-C PROC` on the ECPG command line. + +**Using the SELECT_ERROR Precompiler Option** + +When using ECPGPlus in compatible mode, you can use the `SELECT_ERROR` precompiler option to instruct your program how to handle result sets that contain more rows than the host variable can accommodate. The syntax is: + +```text +SELECT_ERROR={YES|NO} +``` + +The default value is `YES`; a `SELECT` statement will return an error message if the result set exceeds the capacity of the host variable. Specify `NO` to instruct the program to suppress error messages when a `SELECT` statement returns more rows than a host variable can accommodate. + +Use `SELECT_ERROR` with the `EXEC ORACLE OPTION` directive. + +**Compiling in non-PROC Mode** + +If you do not include the `-C PROC` command-line option: + +- C preprocessor directives are copied to the output file without change. +- You must declare the type and name of each C variable that you intend to use as a host variable within an `EXEC SQL BEGIN/END DECLARE` section. + +When invoked in non-`PROC` mode, ECPG implements the behavior described in the PostgreSQL Core documentation. + + + +## Supported C Data Types + +An ECPGPlus application must deal with two sets of data types: SQL data types (such as `SMALLINT`, `DOUBLE PRECISION` and `CHARACTER VARYING`) and C data types (like `short`, `double` and `varchar[n]`). When an application fetches data from the server, ECPGPlus will map each SQL data type to the type of the C variable into which the data is returned. + +In general, ECPGPlus can convert most SQL server types into similar C types, but not all combinations are valid. For example, ECPGPlus will try to convert a SQL character value into a C integer value, but the conversion may fail (at execution time) if the SQL character value contains non-numeric characters. The reverse is also true; when an application sends a value to the server, ECPGPlus will try to convert the C data type into the required SQL type. Again, the conversion may fail (at execution time) if the C value cannot be converted into the required SQL type. + +ECPGPlus can convert any SQL type into C character values `(char[n]` or `varchar[n])`. Although it is safe to convert any SQL type to/from `char[n]` or `varchar[n]`, it is often convenient to use more natural C types such as `int`, `double`, or `float`. + +The supported C data types are: + +- `short` +- `int` +- `unsigned int` +- `long long int` +- `float` +- `double` +- `char[n+1]` +- `varchar[n+1]` +- `bool` +- and any equivalent created by a `typedef` + +In addition to the numeric and character types supported by C, the `pgtypeslib` run-time library offers custom data types (and functions to operate on those types) for dealing with date/time and exact numeric values: + +- `timestamp` +- `interval` +- `date` +- `decimal` +- `numeric` + +To use a data type supplied by `pgtypeslib`, you must `#include` the proper header file. + +## Type Codes + +The following table contains the type codes for *external* data types. An external data type is used to indicate the type of a C host variable. When an application binds a value to a parameter or binds a buffer to a `SELECT`-list item, the type code in the corresponding SQLDA descriptor `(descriptor->T[column])` should be set to one of the following values: + +| **Type Code** | **Host Variable Type (C Data Type)** | +| ------------------------------------------------- | ----------------------------------------- | +| `1, 2, 8, 11, 12, 15, 23, 24, 91, 94, 95, 96, 97` | `char[]` | +| `3` | `int` | +| `4, 7, 21` | `float` | +| `5, 6` | `null-terminated string (char[length+1])` | +| `9` | `varchar` | +| `22` | `double` | +| `68` | `unsigned int` | + +The following table contains the type codes for *internal* data types. An internal type code is used to indicate the type of a value as it resides in the database. The `DESCRIBE SELECT LIST` statement populates the data type array `(descriptor->T[column])` using the following values. + +| **Internal Type Code** | **Server Type** | +| ---------------------- | ------------------------ | +| `1` | `VARCHAR2` | +| `2` | `NUMBER` | +| `8` | `LONG` | +| `11` | `ROWID` | +| `12` | `DATE` | +| `23` | `RAW` | +| `24` | `LONG RAW` | +| `96` | `CHAR` | +| `100` | `BINARY FLOAT` | +| `101` | `BINARY DOUBLE` | +| `104` | `UROWID` | +| `187` | `TIMESTAMP` | +| `188` | `TIMESTAMP W/TIMEZONE` | +| `189` | `INTERVAL YEAR TO MONTH` | +| `190` | `INTERVAL DAY TO SECOND` | +| `232` | `TIMESTAMP LOCAL_TZ` | + + + +## The SQLDA Structure + +Oracle Dynamic SQL method 4 uses the SQLDA data structure to hold the data and metadata for a dynamic SQL statement. A SQLDA structure can describe a set of input parameters corresponding to the parameter markers found in the text of a dynamic statement or the result set of a dynamic statement. The layout of the SQLDA structure is: + +```text +struct SQLDA +{ + int N; /* Number of entries */ + char **V; /* Variables */ + int *L; /* Variable lengths */ + short *T; /* Variable types */ + short **I; /* Indicators */ + int F; /* Count of variables discovered by DESCRIBE */ + char **S; /* Variable names */ + short *M; /* Variable name maximum lengths */ + short *C; /* Variable name actual lengths */ + char **X; /* Indicator names */ + short *Y; /* Indicator name maximum lengths */ + short *Z; /* Indicator name actual lengths */ +}; +``` + +**Parameters** + +`N - maximum number of entries` + + The `N` structure member contains the maximum number of entries that the SQLDA may describe. This member is populated by the `sqlald()` function when you allocate the SQLDA structure. Before using a descriptor in an `OPEN` or `FETCH` statement, you must set `N` to the *actual* number of values described. + +`V - data values` + + The `V` structure member is a pointer to an array of data values. + +- For a `SELECT`-list descriptor, `V` points to an array of values returned by a `FETCH` statement (each member in the array corresponds to a column in the result set). +- For a bind descriptor, `V` points to an array of parameter values (you must populate the values in this array before opening a cursor that uses the descriptor). + +Your application must allocate the space required to hold each value. Refer to [displayResultSet ()](05_building_executing_dynamic_sql_statements/#executing_query_with_unknown_number_of_variables) function for an example of how to allocate space for `SELECT`-list values. + +`L - length of each data value` + + The `L` structure member is a pointer to an array of lengths. Each member of this array must indicate the amount of memory available in the corresponding member of the `V` array. For example, if `V[5]` points to a buffer large enough to hold a 20-byte NULL-terminated string, `L[5]` should contain the value 21 (20 bytes for the characters in the string plus 1 byte for the NULL-terminator). Your application must set each member of the `L` array. + +`T - data types` + + The `T` structure member points to an array of data types, one for each column (or parameter) described by the descriptor. + +- For a bind descriptor, you must set each member of the `T` array to tell ECPGPlus the data type of each parameter. +- For a `SELECT`-list descriptor, the `DESCRIBE SELECT LIST` statement sets each member of the `T` array to reflect the type of data found in the corresponding column. + +You may change any member of the `T` array before executing a `FETCH` statement to force ECPGPlus to convert the corresponding value to a specific data type. For example, if the `DESCRIBE SELECT LIST` statement indicates that a given column is of type `DATE`, you may change the corresponding `T` member to request that the next `FETCH` statement return that value in the form of a NULL-terminated string. Each member of the `T` array is a numeric type code (see [Type Codes](#type-codes) for a list of type codes). The type codes returned by a `DESCRIBE SELECT LIST` statement differ from those expected by a `FETCH` statement. After executing a `DESCRIBE SELECT LIST` statement, each member of `T` encodes a data type *and* a flag indicating whether the corresponding column is nullable. You can use the `sqlnul()` function to extract the type code and nullable flag from a member of the T array. The signature of the `sqlnul()` function is as follows: + +``` +void sqlnul(unsigned short *valType, + unsigned short *typeCode, + int *isNull) +``` + +For example, to find the type code and nullable flag for the third column of a descriptor named results, you would invoke `sqlnul()` as follows: + +``` +sqlnul(&results->T[2], &typeCode, &isNull); +``` + +`I - indicator variables` + + The `I` structure member points to an array of indicator variables. This array is allocated for you when your application calls the `sqlald()` function to allocate the descriptor. + +- For a `SELECT`-list descriptor, each member of the `I` array indicates whether the corresponding column contains a NULL (non-zero) or non-NULL (zero) value. +- For a bind parameter, your application must set each member of the `I` array to indicate whether the corresponding parameter value is NULL. + +`F - number of entries` + + The `F` structure member indicates how many values are described by the descriptor (the `N` structure member indicates the *maximum* number of values which may be described by the descriptor; `F` indicates the actual number of values). The value of the `F` member is set by ECPGPlus when you execute a `DESCRIBE` statement. `F` may be positive, negative, or zero. + +- For a `SELECT`-list descriptor, `F` will contain a positive value if the number of columns in the result set is equal to or less than the maximum number of values permitted by the descriptor (as determined by the `N` structure member); 0 if the statement is *not* a `SELECT` statement, or a negative value if the query returns more columns than allowed by the `N` structure member. +- For a bind descriptor, `F` will contain a positive number if the number of parameters found in the statement is less than or equal to the maximum number of values permitted by the descriptor (as determined by the `N` structure member); 0 if the statement contains no parameters markers, or a negative value if the statement contains more parameter markers than allowed by the `N` structure member. + +If `F` contains a positive number (after executing a `DESCRIBE` statement), that number reflects the count of columns in the result set (for a `SELECT`-list descriptor) or the number of parameter markers found in the statement (for a bind descriptor). If `F` contains a negative value, you may compute the absolute value of `F` to discover how many values (or parameter markers) are required. For example, if `F` contains `-24` after describing a `SELECT` list, you know that the query returns 24 columns. + +`S - column/parameter names` + + The `S` structure member points to an array of NULL-terminated strings. + +- For a `SELECT`-list descriptor, the `DESCRIBE SELECT LIST` statement sets each member of this array to the name of the corresponding column in the result set. +- For a bind descriptor, the `DESCRIBE BIND VARIABLES` statement sets each member of this array to the name of the corresponding bind variable. + +In this release, the name of each bind variable is determined by the left-to-right order of the parameter marker within the query - for example, the name of the first parameter is always `?0`, the name of the second parameter is always `?1`, and so on. + +`M - maximum column/parameter name length` + + The `M` structure member points to an array of lengths. Each member in this array specifies the *maximum* length of the corresponding member of the `S` array (that is, `M[0]` specifies the maximum length of the column/parameter name found at `S[0]`). This array is populated by the `sqlald()` function. + +`C - actual column/parameter name length` + + The `C` structure member points to an array of lengths. Each member in this array specifies the *actual* length of the corresponding member of the `S` array (that is, `C[0]` specifies the actual length of the column/parameter name found at `S[0]`). + + This array is populated by the `DESCRIBE` statement. + +`X - indicator variable names` + + The `X` structure member points to an array of NULL-terminated strings -each string represents the name of a NULL indicator for the corresponding value. + + This array is not used by ECPGPlus, but is provided for compatibility with Pro\*C applications. + +`Y - maximum indicator name length` + + The `Y` structure member points to an array of lengths. Each member in this array specifies the *maximum* length of the corresponding member of the `X` array (that is, `Y[0]` specifies the maximum length of the indicator name found at `X[0]`). + + This array is not used by ECPGPlus, but is provided for compatibility with Pro\*C applications. + +`Z - actual indicator name length` + + The `Z` structure member points to an array of lengths. Each member in this array specifies the *actual* length of the corresponding member of the `X` array (that is, `Z[0]` specifies the actual length of the indicator name found at `X[0]`). + + This array is not used by ECPGPlus, but is provided for compatibility with Pro\*C applications. + +## ECPGPlus Statements + +An embedded SQL statement allows your client application to interact with the server, while an embedded directive is an instruction to the ECPGPlus compiler. + +You can embed any Advanced Server SQL statement in a C program. Each statement should begin with the keywords `EXEC SQL`, and must be terminated with a semi-colon (;). Within the C program, a SQL statement takes the form: + +```text +EXEC SQL ; +``` + +Where `sql_command_body` represents a standard SQL statement. You can use a host variable anywhere that the SQL statement expects a value expression. For more information about substituting host variables for value expressions, refer to [Declaring Host Variables](03_using_embedded_sql/#declaring-host-variables). + +ECPGPlus extends the PostgreSQL server-side syntax for some statements; for those statements, syntax differences are outlined in the following reference sections. For a complete reference to the supported syntax of other SQL commands, refer to the *PostgreSQL* *Core* *Documentation* available at: + + + +### ALLOCATE DESCRIPTOR + +Use the `ALLOCATE DESCRIPTOR` statement to allocate an SQL descriptor area: + +```text +EXEC SQL [FOR ] ALLOCATE DESCRIPTOR + [WITH MAX ]; +``` + +Where: + +`array_size` is a variable that specifies the number of array elements to allocate for the descriptor. `array_size` may be an `INTEGER` value or a host variable. + +`descriptor_name` is the host variable that contains the name of the descriptor, or the name of the descriptor. This value may take the form of an identifier, a quoted string literal, or of a host variable. + +`variable_count` specifies the maximum number of host variables in the descriptor. The default value of `variable_count` is `100`. + +The following code fragment allocates a descriptor named `emp_query` that may be processed as an array `(emp_array)`: + +```text +EXEC SQL FOR :emp_array ALLOCATE DESCRIPTOR emp_query; +``` + +### CALL + +Use the `CALL` statement to invoke a procedure or function on the server. The `CALL` statement works only on Advanced Server. The `CALL` statement comes in two forms; the first form is used to call a *function*: + +```text +EXEC SQL CALL '('[]')' + INTO [[:][: ]]; +``` + +The second form is used to call a *procedure*: + +```text +EXEC SQL CALL '('[]')'; +``` + +Where: + +`program_name` is the name of the stored procedure or function that the `CALL` statement invokes. The program name may be schema-qualified or package-qualified (or both); if you do not specify the schema or package in which the program resides, ECPGPlus will use the value of `search_path` to locate the program. + +`actual_arguments` specifies a comma-separated list of arguments required by the program. Note that each `actual_argument` corresponds to a formal argument expected by the program. Each formal argument may be an `IN` parameter, an `OUT` parameter, or an `INOUT` parameter. + +`:ret_variable` specifies a host variable that will receive the value returned if the program is a function. + +`:ret_indicator` specifies a host variable that will receive the indicator value returned, if the program is a function. + +For example, the following statement invokes the `get_job_desc` function with the value contained in the `:ename` host variable, and captures the value returned by that function in the `:job` host variable: + +```text +EXEC SQL CALL get_job_desc(:ename) + INTO :job; +``` + +### CLOSE + +Use the `CLOSE` statement to close a cursor, and free any resources currently in use by the cursor. A client application cannot fetch rows from a closed cursor. The syntax of the `CLOSE` statement is: + +```text +EXEC SQL CLOSE []; +``` + +Where: + +`cursor_name` is the name of the cursor closed by the statement. The cursor name may take the form of an identifier or of a host variable. + +The `OPEN` statement initializes a cursor. Once initialized, a cursor result set will remain unchanged unless the cursor is re-opened. You do not need to `CLOSE` a cursor before re-opening it. + +To manually close a cursor named `emp_cursor`, use the command: + +```text +EXEC SQL CLOSE emp_cursor; +``` + +A cursor is automatically closed when an application terminates. + +### COMMIT + +Use the `COMMIT` statement to complete the current transaction, making all changes permanent and visible to other users. The syntax is: + +```text +EXEC SQL [AT ] COMMIT [WORK] + [COMMENT <'text'>] [COMMENT <'text'> RELEASE]; +``` + +Where: + +`database_name` is the name of the database (or host variable that contains the name of the database) in which the work resides. This value may take the form of an unquoted string literal, or of a host variable. + +For compatibility, ECPGPlus accepts the `COMMENT` clause without error but does *not* store any text included with the `COMMENT` clause. + +Include the `RELEASE` clause to close the current connection after performing the commit. + +For example, the following command commits all work performed on the `dept` database and closes the current connection: + +```text +EXEC SQL AT dept COMMIT RELEASE; +``` + +By default, statements are committed only when a client application performs a `COMMIT` statement. Include the `-t` option when invoking ECPGPlus to specify that a client application should invoke `AUTOCOMMIT` functionality. You can also control `AUTOCOMMIT` functionality in a client application with the following statements: + +```text +EXEC SQL SET AUTOCOMMIT TO ON +``` + +and + +```text +EXEC SQL SET AUTOCOMMIT TO OFF +``` + +### CONNECT + +Use the `CONNECT` statement to establish a connection to a database. The `CONNECT` statement is available in two forms - one form is compatible with Oracle databases, the other is not. + +The first form is compatible with Oracle databases: + +```text +EXEC SQL CONNECT + {{: IDENTIFIED BY :} | :} + [AT ] + [USING :database_string] + [ALTER AUTHORIZATION :new_password]; +``` + +Where: + +`user_name` is a host variable that contains the role that the client application will use to connect to the server. + +`password` is a host variable that contains the password associated with that role. + +`connection_id` is a host variable that contains a slash-delimited user name and password used to connect to the database. + +Include the `AT` clause to specify the database to which the connection is established. `database_name` is the name of the database to which the client is connecting; specify the value in the form of a variable, or as a string literal. + +Include the `USING` clause to specify a host variable that contains a null-terminated string identifying the database to which the connection will be established. + +The `ALTER AUTHORIZATION` clause is supported for syntax compatibility only; ECPGPlus parses the `ALTER AUTHORIZATION` clause, and reports a warning. + +Using the first form of the `CONNECT` statement, a client application might establish a connection with a host variable named `user` that contains the identity of the connecting role, and a host variable named `password` that contains the associated password using the following command: + +```text +EXEC SQL CONNECT :user IDENTIFIED BY :password; +``` + +A client application could also use the first form of the `CONNECT` statement to establish a connection using a single host variable named `:connection_id`. In the following example, `connection_id` contains the slash-delimited role name and associated password for the user: + +```text +EXEC SQL CONNECT :connection_id; +``` + +The syntax of the second form of the `CONNECT` statement is: + +```text +EXEC SQL CONNECT TO +[AS ] []; +``` + +Where `credentials` is one of the following: + +```text +USER user_name password +USER user_name IDENTIFIED BY password +USER user_name USING password +``` + +In the second form: + +`database_name` is the name or identity of the database to which the client is connecting. Specify `database_name` as a variable, or as a string literal, in one of the following forms: + +```text +[@][:] + +tcp:postgresql://[:][/][options] + +unix:postgresql://[:][/][options] +``` + +Where: + + `hostname` is the name or IP address of the server on which the database resides. + + `port` is the port on which the server listens. + + You can also specify a value of `DEFAULT` to establish a connection with the default database, using the default role name. If you specify `DEFAULT` as the target database, do not include a `connection_name` or `credentials`. + +`connection_name` is the name of the connection to the database. `connection_name` should take the form of an identifier (that is, not a string literal or a variable). You can open multiple connections, by providing a unique `connection_name` for each connection. + + If you do not specify a name for a connection, `ecpglib` assigns a name of `DEFAULT` to the connection. You can refer to the connection by name (`DEFAULT`) in any `EXEC SQL` statement. + + `CURRENT` is the most recently opened or the connection mentioned in the most-recent `SET CONNECTION TO` statement. If you do not refer to a connection by name in an `EXEC SQL` statement, ECPG assumes the name of the connection to be `CURRENT`. + +`user_name` is the role used to establish the connection with the Advanced Server database. The privileges of the specified role will be applied to all commands performed through the connection. + +`password` is the password associated with the specified `user_name`. + +The following code fragment uses the second form of the `CONNECT` statement to establish a connection to a database named `edb`, using the role `alice` and the password associated with that role, `1safepwd`: + +```text +EXEC SQL CONNECT TO edb AS acctg_conn + USER 'alice' IDENTIFIED BY '1safepwd'; +``` + +The name of the connection is `acctg_conn`; you can use the connection name when changing the connection name using the `SET CONNECTION` statement. + +### DEALLOCATE DESCRIPTOR + +Use the `DEALLOCATE DESCRIPTOR` statement to free memory in use by an allocated descriptor. The syntax of the statement is: + +```text +EXEC SQL DEALLOCATE DESCRIPTOR +``` + +Where: + +`descriptor_name` is the name of the descriptor. This value may take the form of a quoted string literal, or of a host variable. + +The following example deallocates a descriptor named `emp_query`: + +```text +EXEC SQL DEALLOCATE DESCRIPTOR emp_query; +``` + +### DECLARE CURSOR + +Use the `DECLARE CURSOR` statement to define a cursor. The syntax of the statement is: + +```text +EXEC SQL [AT ] DECLARE CURSOR FOR +( | ); +``` + +Where: + +`database_name` is the name of the database on which the cursor operates. This value may take the form of an identifier or of a host variable. If you do not specify a database name, the default value of `database_name` is the default database. + +`cursor_name` is the name of the cursor. + +`select_statement` is the text of the `SELECT` statement that defines the cursor result set; the `SELECT` statement cannot contain an `INTO` clause. + +`statement_name` is the name of a SQL statement or block that defines the cursor result set. + +The following example declares a cursor named `employees`: + +```text +EXEC SQL DECLARE employees CURSOR FOR + SELECT + empno, ename, sal, comm + FROM + emp; +``` + +The cursor generates a result set that contains the employee number, employee name, salary and commission for each employee record that is stored in the `emp` table. + +### DECLARE DATABASE + +Use the `DECLARE DATABASE` statement to declare a database identifier for use in subsequent SQL statements (for example, in a `CONNECT` statement). The syntax is: + +```text +EXEC SQL DECLARE DATABASE; +``` + +Where: + +`database_name` specifies the name of the database. + +The following example demonstrates declaring an identifier for the `acctg` database: + +```text +EXEC SQL DECLARE acctg DATABASE; +``` + +After invoking the command declaring `acctg` as a database identifier, the `acctg` database can be referenced by name when establishing a connection or in `AT` clauses. + +This statement has no effect and is provided for Pro\*C compatibility only. + +### DECLARE STATEMENT + +Use the `DECLARE STATEMENT` directive to declare an identifier for an SQL statement. Advanced Server supports two versions of the `DECLARE STATEMENT` directive: + +```text +EXEC SQL [] DECLARE STATEMENT; +``` + +and + +```text +EXEC SQL DECLARE STATEMENT ; +``` + +Where: + +`statement_name` specifies the identifier associated with the statement. + +`database_name` specifies the name of the database. This value may take the form of an identifier or of a host variable that contains the identifier. + +A typical usage sequence that includes the `DECLARE STATEMENT` directive might be: + +```text +EXEC SQL DECLARE give_raise STATEMENT; // give_raise is now a statement +handle (not prepared) +EXEC SQL PREPARE give_raise FROM :stmtText; // give_raise is now associated +with a statement +EXEC SQL EXECUTE give_raise; +``` + +This statement has no effect and is provided for Pro\*C compatibility only. + +### DELETE + +Use the `DELETE` statement to delete one or more rows from a table. The syntax for the ECPGPlus `DELETE` statement is the same as the syntax for the SQL statement, but you can use parameter markers and host variables any place that an expression is allowed. The syntax is: + +```text +[FOR ] DELETE FROM [ONLY] [[AS] ] + [USING ] + [WHERE | WHERE CURRENT OF ] + [{RETURNING|RETURN} * | [[AS] ] +[, ...] INTO ] +``` + +Where: + +Include the `FOR exec_count` clause to specify the number of times the statement will execute; this clause is valid only if the `VALUES` clause references an array or a pointer to an array. + +`table` is the name (optionally schema-qualified) of an existing table. Include the `ONLY` clause to limit processing to the specified table; if you do not include the `ONLY` clause, any tables inheriting from the named table are also processed. + +`alias` is a substitute name for the target table. + +`using_list` is a list of table expressions, allowing columns from other tables to appear in the `WHERE` condition. + +Include the `WHERE` clause to specify which rows should be deleted. If you do not include a `WHERE` clause in the statement, `DELETE` will delete all rows from the table, leaving the table definition intact. + +`condition` is an expression, host variable or parameter marker that returns a value of type `BOOLEAN`. Those rows for which `condition` returns true will be deleted. + +`cursor_name` is the name of the cursor to use in the `WHERE CURRENT OF` clause; the row to be deleted will be the one most recently fetched from this cursor. The cursor must be a non-grouping query on the `DELETE` statements target table. You cannot specify `WHERE CURRENT OF` in a `DELETE` statement that includes a Boolean condition. + +The `RETURN/RETURNING` clause specifies an `output_expression` or `host_variable_list` that is returned by the `DELETE` command after each row is deleted: + +- `output_expression` is an expression to be computed and returned by the `DELETE` command after each row is deleted. `output_name` is the name of the returned column; include \* to return all columns. + +- `host_variable_list` is a comma-separated list of host variables and optional indicator variables. Each host variable receives a corresponding value from the `RETURNING` clause. + +For example, the following statement deletes all rows from the `emp` table where the `sal` column contains a value greater than the value specified in the host variable, `:max_sal:` + +```text +DELETE FROM emp WHERE sal > :max_sal; +``` + +For more information about using the `DELETE` statement, see the PostgreSQL Core documentation available at: + + + +### DESCRIBE + +Use the `DESCRIBE` statement to find the number of input values required by a prepared statement or the number of output values returned by a prepared statement. The `DESCRIBE` statement is used to analyze a SQL statement whose shape is unknown at the time you write your application. + +The `DESCRIBE` statement populates an `SQLDA` descriptor; to populate a SQL descriptor, use the `ALLOCATE DESCRIPTOR` and `DESCRIBE...DESCRIPTOR` statements. + +```text +EXEC SQL DESCRIBE BIND VARIABLES FOR INTO ; +``` + +or + +```text +EXEC SQL DESCRIBE SELECT LIST FOR INTO ; +``` + +Where: + +`statement_name` is the identifier associated with a prepared SQL statement or PL/SQL block. + +`descriptor` is the name of C variable of type `SQLDA*`. You must allocate the space for the descriptor by calling `sqlald()` (and initialize the descriptor) before executing the `DESCRIBE` statement. + +When you execute the first form of the `DESCRIBE` statement, ECPG populates the given descriptor with a description of each input variable *required* by the statement. For example, given two descriptors: + +```text +SQLDA *query_values_in; +SQLDA *query_values_out; +``` + +You might prepare a query that returns information from the `emp` table: + +```text +EXEC SQL PREPARE get_emp FROM + "SELECT ename, empno, sal FROM emp WHERE empno = ?"; +``` + +The command requires one input variable (for the parameter marker (?)). + +```text +EXEC SQL DESCRIBE BIND VARIABLES + FOR get_emp INTO query_values_in; +``` + +After describing the bind variables for this statement, you can examine the descriptor to find the number of variables required and the type of each variable. + +When you execute the second form, ECPG populates the given descriptor with a description of each value *returned* by the statement. For example, the following statement returns three values: + +```text +EXEC SQL DESCRIBE SELECT LIST + FOR get_emp INTO query_values_out; +``` + +After describing the select list for this statement, you can examine the descriptor to find the number of returned values and the name and type of each value. + +Before *executing* the statement, you must bind a variable for each input value and a variable for each output value. The variables that you bind for the input values specify the actual values used by the statement. The variables that you bind for the output values tell ECPGPlus where to put the values when you execute the statement. + +This is alternate Pro\*C compatible syntax for the `DESCRIBE DESCRIPTOR` statement. + +### DESCRIBE DESCRIPTOR + +Use the `DESCRIBE DESCRIPTOR` statement to retrieve information about a SQL statement, and store that information in a SQL descriptor. Before using `DESCRIBE DESCRIPTOR`, you must allocate the descriptor with the `ALLOCATE DESCRIPTOR` statement. The syntax is: + +```text +EXEC SQL DESCRIBE [INPUT | OUTPUT] + USING [SQL] DESCRIPTOR ; +``` + +Where: + +`statement_name` is the name of a prepared SQL statement. + +`descriptor_name` is the name of the descriptor. `descriptor_name` can be a quoted string value or a host variable that contains the name of the descriptor. + +If you include the `INPUT` clause, ECPGPlus populates the given descriptor with a description of each input variable *required* by the statement. + +For example, given two descriptors: + +```text +EXEC SQL ALLOCATE DESCRIPTOR query_values_in; +EXEC SQL ALLOCATE DESCRIPTOR query_values_out; +``` + +You might prepare a query that returns information from the `emp` table: + +```text +EXEC SQL PREPARE get_emp FROM + "SELECT ename, empno, sal FROM emp WHERE empno = ?"; +``` + +The command requires one input variable (for the parameter marker (?)). + +```text +EXEC SQL DESCRIBE INPUT get_emp USING 'query_values_in'; +``` + +After describing the bind variables for this statement, you can examine the descriptor to find the number of variables required and the type of each variable. + +If you do not specify the `INPUT` clause, `DESCRIBE DESCRIPTOR` populates the specified descriptor with the values returned by the statement. + +If you include the `OUTPUT` clause, ECPGPlus populates the given descriptor with a description of each value *returned* by the statement. + +For example, the following statement returns three values: + +```text +EXEC SQL DESCRIBE OUTPUT FOR get_emp USING 'query_values_out'; +``` + +After describing the select list for this statement, you can examine the descriptor to find the number of returned values and the name and type of each value. + +### DISCONNECT + +Use the `DISCONNECT` statement to close the connection to the server. The syntax is: + +```text +EXEC SQL DISCONNECT [][CURRENT][DEFAULT][ALL]; +``` + +Where: + +`connection_name` is the connection name specified in the `CONNECT` statement used to establish the connection. If you do not specify a connection name, the current connection is closed. + +Include the `CURRENT` keyword to specify that ECPGPlus should close the most-recently used connection. + +Include the `DEFAULT` keyword to specify that ECPGPlus should close the connection named `DEFAULT`. If you do not specify a name when opening a connection, ECPGPlus assigns the name, `DEFAULT`, to the connection. + +Include the `ALL` keyword to instruct ECPGPlus to close all active connections. + +The following example creates a connection (named `hr_connection`) that connects to the `hr` database, and then disconnects from the connection: + +```text +/* client.pgc*/ +int main() +{ + EXEC SQL CONNECT TO hr AS connection_name; + EXEC SQL DISCONNECT connection_name; + return(0); +} +``` + +### EXECUTE + +Use the `EXECUTE` statement to execute a statement previously prepared using an `EXEC SQL PREPARE` statement. The syntax is: + +```text +EXEC SQL [FOR ] EXECUTE + [USING {DESCRIPTOR + |: [[INDICATOR] :]}]; +``` + +Where: + +`array_size` is an integer value or a host variable that contains an integer value that specifies the number of rows to be processed. If you omit the `FOR` clause, the statement is executed once for each member of the array. + +`statement_name` specifies the name assigned to the statement when the statement was created (using the `EXEC SQL PREPARE` statement). + +Include the `USING` clause to supply values for parameters within the prepared statement: + +- Include the `DESCRIPTOR` `SQLDA_descriptor` clause to provide an SQLDA descriptor value for a parameter. + +- Use a `host_variable` (and an optional `indicator_variable`) to provide a user-specified value for a parameter. + +The following example creates a prepared statement that inserts a record into the `emp` table: + +```text +EXEC SQL PREPARE add_emp (numeric, text, text, numeric) AS + INSERT INTO emp VALUES($1, $2, $3, $4); +``` + +Each time you invoke the prepared statement, provide fresh parameter values for the statement: + +```text +EXEC SQL EXECUTE add_emp USING 8000, 'DAWSON', 'CLERK', 7788; +EXEC SQL EXECUTE add_emp USING 8001, 'EDWARDS', 'ANALYST', 7698; +``` + +### EXECUTE DESCRIPTOR + +Use the `EXECUTE` statement to execute a statement previously prepared by an `EXEC SQL PREPARE` statement, using an SQL descriptor. The syntax is: + +```text +EXEC SQL [FOR ] EXECUTE + [USING [SQL] DESCRIPTOR ] + [INTO [SQL] DESCRIPTOR ]; +``` + +Where: + +`array_size` is an integer value or a host variable that contains an integer value that specifies the number of rows to be processed. If you omit the `FOR` clause, the statement is executed once for each member of the array. + +`statement_identifier` specifies the identifier assigned to the statement with the `EXEC SQL PREPARE` statement. + +Include the `USING` clause to specify values for any input parameters required by the prepared statement. + +Include the `INTO` clause to specify a descriptor into which the `EXECUTE` statement will write the results returned by the prepared statement. + +`descriptor_name` specifies the name of a descriptor (as a single-quoted string literal), or a host variable that contains the name of a descriptor. + +The following example executes the prepared statement, `give_raise`, using the values contained in the descriptor `stmtText:` + +```text +EXEC SQL PREPARE give_raise FROM :stmtText; +EXEC SQL EXECUTE give_raise USING DESCRIPTOR :stmtText; +``` + +### EXECUTE...END EXEC + +Use the `EXECUTE…END-EXEC` statement to embed an anonymous block into a client application. The syntax is: + +```text +EXEC SQL [AT ] EXECUTE END-EXEC; +``` + +Where: + +`database_name` is the database identifier or a host variable that contains the database identifier. If you omit the `AT` clause, the statement will be executed on the current default database. + +`anonymous_block` is an inline sequence of PL/pgSQL or SPL statements and declarations. You may include host variables and optional indicator variables within the block; each such variable is treated as an `IN/OUT` value. + +The following example executes an anonymous block: + +```text +EXEC SQL EXECUTE + BEGIN + IF (current_user = :admin_user_name) THEN + DBMS_OUTPUT.PUT_LINE('You are an administrator'); + END IF; +END-EXEC; +``` + +!!! Note + The `EXECUTE…END EXEC` statement is supported only by Advanced Server. + +### EXECUTE IMMEDIATE + +Use the `EXECUTE IMMEDIATE` statement to execute a string that contains a SQL command. The syntax is: + +```text +EXEC SQL [AT ] EXECUTE IMMEDIATE ; +``` + +Where: + +`database_name` is the database identifier or a host variable that contains the database identifier. If you omit the `AT` clause, the statement will be executed on the current default database. + +`command_text` is the command executed by the `EXECUTE IMMEDIATE` statement. + +This dynamic SQL statement is useful when you don't know the text of an SQL statement (ie., when writing a client application). For example, a client application may prompt a (trusted) user for a statement to execute. After the user provides the text of the statement as a string value, the statement is then executed with an `EXECUTE IMMEDIATE` command. + +The statement text may not contain references to host variables. If the statement may contain parameter markers or returns one or more values, you must use the `PREPARE` and `DESCRIBE` statements. + +The following example executes the command contained in the `:command_text` host variable: + +```text +EXEC SQL EXECUTE IMMEDIATE :command_text; +``` + +### FETCH + +Use the `FETCH` statement to return rows from a cursor into an SQLDA descriptor or a target list of host variables. Before using a `FETCH` statement to retrieve information from a cursor, you must prepare the cursor using `DECLARE` and `OPEN` statements. The statement syntax is: + +```text +EXEC SQL [FOR ] FETCH + { USING DESCRIPTOR }|{ INTO }; +``` + +Where: + +`array_size` is an integer value or a host variable that contains an integer value specifying the number of rows to fetch. If you omit the `FOR` clause, the statement is executed once for each member of the array. + +`cursor` is the name of the cursor from which rows are being fetched, or a host variable that contains the name of the cursor. + +If you include a `USING` clause, the `FETCH` statement will populate the specified SQLDA descriptor with the values returned by the server. + +If you include an `INTO` clause, the `FETCH` statement will populate the host variables (and optional indicator variables) specified in the `target_list`. + +The following code fragment declares a cursor named `employees` that retrieves the `employee number`, `name` and `salary` from the `emp` table: + +```text +EXEC SQL DECLARE employees CURSOR FOR + SELECT empno, ename, esal FROM emp; +EXEC SQL OPEN emp_cursor; +EXEC SQL FETCH emp_cursor INTO :emp_no, :emp_name, :emp_sal; +``` + +### FETCH DESCRIPTOR + +Use the `FETCH DESCRIPTOR` statement to retrieve rows from a cursor into an SQL descriptor. The syntax is: + +```text +EXEC SQL [FOR ] FETCH + INTO [SQL] DESCRIPTOR ; +``` + +Where: + +`array_size` is an integer value or a host variable that contains an integer value specifying the number of rows to fetch. If you omit the `FOR` clause, the statement is executed once for each member of the array. + +`cursor` is the name of the cursor from which rows are fetched, or a host variable that contains the name of the cursor. The client must `DECLARE` and `OPEN` the cursor before calling the `FETCH DESCRIPTOR` statement. + +Include the `INTO` clause to specify an SQL descriptor into which the `EXECUTE` statement will write the results returned by the prepared statement. `descriptor_name` specifies the name of a descriptor (as a single-quoted string literal), or a host variable that contains the name of a descriptor. Prior to use, the descriptor must be allocated using an `ALLOCATE DESCRIPTOR` statement. + +The following example allocates a descriptor named `row_desc` that will hold the description and the values of a specific row in the result set. It then declares and opens a cursor for a prepared statement (`my_cursor`), before looping through the rows in result set, using a `FETCH` to retrieve the next row from the cursor into the descriptor: + +```text +EXEC SQL ALLOCATE DESCRIPTOR 'row_desc'; +EXEC SQL DECLARE my_cursor CURSOR FOR query; +EXEC SQL OPEN my_cursor; + +for( row = 0; ; row++ ) +{ + EXEC SQL BEGIN DECLARE SECTION; + int col; + EXEC SQL END DECLARE SECTION; + EXEC SQL FETCH my_cursor INTO SQL DESCRIPTOR 'row_desc'; +``` + +### GET DESCRIPTOR + +Use the `GET DESCRIPTOR` statement to retrieve information from a descriptor. The `GET DESCRIPTOR` statement comes in two forms. The first form returns the number of values (or columns) in the descriptor. + +```text +EXEC SQL GET DESCRIPTOR + : = COUNT; +``` + +The second form returns information about a specific value (specified by the `VALUE column_number` clause). + +```text +EXEC SQL [FOR ] GET DESCRIPTOR + VALUE {: = {,…}}; +``` + +Where: + +`array_size` is an integer value or a host variable that contains an integer value that specifies the number of rows to be processed. If you specify an `array_size`, the `host_variable` must be an array of that size; for example, if `array_size` is `10`, `:host_variable` must be a 10-member array of `host_variables`. If you omit the `FOR` clause, the statement is executed once for each member of the array. + +`descriptor_name` specifies the name of a descriptor (as a single-quoted string literal), or a host variable that contains the name of a descriptor. + +Include the `VALUE` clause to specify the information retrieved from the descriptor. + +- `column_number` identifies the position of the variable within the descriptor. + +- `host_variable` specifies the name of the host variable that will receive the value of the item. + +- `descriptor_item` specifies the type of the retrieved descriptor item. + +ECPGPlus implements the following `descriptor_item` types: + +- `TYPE` +- `LENGTH` +- `OCTET_LENGTH` +- `RETURNED_LENGTH` +- `RETURNED_OCTET_LENGTH` +- `PRECISION` +- `SCALE` +- `NULLABLE` +- `INDICATOR` +- `DATA` +- `NAME` + +The following code fragment demonstrates using a `GET DESCRIPTOR` statement to obtain the number of columns entered in a user-provided string: + +```text +EXEC SQL ALLOCATE DESCRIPTOR parse_desc; +EXEC SQL PREPARE query FROM :stmt; +EXEC SQL DESCRIBE query INTO SQL DESCRIPTOR parse_desc; +EXEC SQL GET DESCRIPTOR parse_desc :col_count = COUNT; +``` + +The example allocates an SQL descriptor (named `parse_desc`), before using a `PREPARE` statement to syntax check the string provided by the user `(:stmt)`. A `DESCRIBE` statement moves the user-provided string into the descriptor, `parse_desc`. The call to `EXEC SQL GET DESCRIPTOR` interrogates the descriptor to discover the number of columns `(:col_count)` in the result set. + +### INSERT + +Use the `INSERT` statement to add one or more rows to a table. The syntax for the ECPGPlus `INSERT` statement is the same as the syntax for the SQL statement, but you can use parameter markers and host variables any place that a value is allowed. The syntax is: + +```text +[FOR ] INSERT INTO
[( [, ...])] + {DEFAULT VALUES | + VALUES ({ | DEFAULT} [, ...])[, ...] | } + [RETURNING * | [[ AS ] ] [, ...]] +``` + +Where: + +Include the `FOR exec_count` clause to specify the number of times the statement will execute; this clause is valid only if the `VALUES` clause references an array or a pointer to an array. + +`table` specifies the (optionally schema-qualified) name of an existing table. + +`column` is the name of a column in the table. The column name may be qualified with a subfield name or array subscript. Specify the `DEFAULT VALUES` clause to use default values for all columns. + +`expression` is the expression, value, host variable or parameter marker that will be assigned to the corresponding column. Specify `DEFAULT` to fill the corresponding column with its default value. + +`query` specifies a `SELECT` statement that supplies the row(s) to be inserted. + +`output_expression` is an expression that will be computed and returned by the `INSERT` command after each row is inserted. The expression can refer to any column within the table. Specify \* to return all columns of the inserted row(s). + +`output_name` specifies a name to use for a returned column. + +The following example adds a row to the `employees` table: + +```text +INSERT INTO emp (empno, ename, job, hiredate) + VALUES ('8400', :ename, 'CLERK', '2011-10-31'); +``` + +!!! Note + The `INSERT` statement uses a host variable `(:ename)` to specify the value of the `ename` column. + +For more information about using the `INSERT` statement, see the PostgreSQL Core documentation available at: + + + +### OPEN + +Use the `OPEN` statement to open a cursor. The syntax is: + +```text +EXEC SQL [FOR ] OPEN [USING ]; +``` + +Where `parameters` is one of the following: + +```text +DESCRIPTOR +``` + +or + +```text + [ [ INDICATOR ] , … ] +``` + +Where: + +`array_size` is an integer value or a host variable that contains an integer value specifying the number of rows to fetch. If you omit the `FOR` clause, the statement is executed once for each member of the array. + +`cursor` is the name of the cursor being opened. + +`parameters` is either `DESCRIPTOR SQLDA_descriptor` or a comma-separated list of `host variables` (and optional `indicator variables`) that initialize the cursor. If specifying an `SQLDA_descriptor`, the descriptor must be initialized with a `DESCRIBE` statement. + +The `OPEN` statement initializes a cursor using the values provided in `parameters`. Once initialized, the cursor result set will remain unchanged unless the cursor is closed and re-opened. A cursor is automatically closed when an application terminates. + +The following example declares a cursor named `employees`, that queries the `emp` table, returning the `employee number`, `name`, `salary` and `commission` of an employee whose name matches a user-supplied value (stored in the host variable, `:emp_name`). + +```text + EXEC SQL DECLARE employees CURSOR FOR + SELECT + empno, ename, sal, comm  + FROM  + emp + WHERE ename = :emp_name; + EXEC SQL OPEN employees; +... +``` + +After declaring the cursor, the example uses an `OPEN` statement to make the contents of the cursor available to a client application. + +### OPEN DESCRIPTOR + +Use the `OPEN DESCRIPTOR` statement to open a cursor with a SQL descriptor. The syntax is: + +```text +EXEC SQL [FOR ] OPEN + [USING [SQL] DESCRIPTOR ] + [INTO [SQL] DESCRIPTOR ]; +``` + +Where: + +`array_size` is an integer value or a host variable that contains an integer value specifying the number of rows to fetch. If you omit the `FOR` clause, the statement is executed once for each member of the array. + +`cursor` is the name of the cursor being opened. + +`descriptor_name` specifies the name of an SQL descriptor (in the form of a single-quoted string literal) or a host variable that contains the name of an SQL descriptor that contains the query that initializes the cursor. + +For example, the following statement opens a cursor (named `emp_cursor`), using the host variable, `:employees`: + +```text +EXEC SQL OPEN emp_cursor USING DESCRIPTOR :employees; +``` + +### PREPARE + +Prepared statements are useful when a client application must perform a task multiple times; the statement is parsed, written and planned only once, rather than each time the statement is executed, saving repetitive processing time. + +Use the `PREPARE` statement to prepare an SQL statement or PL/pgSQL block for execution. The statement is available in two forms; the first form is: + +```text +EXEC SQL [AT ] PREPARE + FROM ; +``` + +The second form is: + +```text +EXEC SQL [AT ] PREPARE + AS ; +``` + +Where: + +`database_name` is the database identifier or a host variable that contains the database identifier against which the statement will execute. If you omit the `AT` clause, the statement will execute against the current default database. + +`statement_name` is the identifier associated with a prepared SQL statement or PL/SQL block. + +`sql_statement` may take the form of a `SELECT` statement, a single-quoted string literal or host variable that contains the text of an SQL statement. + +To include variables within a prepared statement, substitute placeholders (`$1, $2, $3`, etc.) for statement values that might change when you `PREPARE` the statement. When you `EXECUTE` the statement, provide a value for each parameter. The values must be provided in the order in which they will replace placeholders. + +The following example creates a prepared statement (named `add_emp`) that inserts a record into the `emp` table: + +```text +EXEC SQL PREPARE add_emp (int, text, text, numeric) AS + INSERT INTO emp VALUES($1, $2, $3, $4); +``` + +Each time you invoke the statement, provide fresh parameter values for the statement: + +```text +EXEC SQL EXECUTE add_emp(8003, 'Davis', 'CLERK', 2000.00); +EXEC SQL EXECUTE add_emp(8004, 'Myer', 'CLERK', 2000.00); +``` + +!!! Note + A client application must issue a `PREPARE` statement within each session in which a statement will be executed; prepared statements persist only for the duration of the current session. + +### ROLLBACK + +Use the `ROLLBACK` statement to abort the current transaction, and discard any updates made by the transaction. The syntax is: + +```text +EXEC SQL [AT ] ROLLBACK [WORK] + [ { TO [SAVEPOINT] } | RELEASE ] +``` + +Where: + +`database_name` is the database identifier or a host variable that contains the database identifier against which the statement will execute. If you omit the `AT` clause, the statement will execute against the current default database. + +Include the `TO` clause to abort any commands that were executed after the specified `savepoint`; use the `SAVEPOINT` statement to define the `savepoint`. If you omit the `TO` clause, the `ROLLBACK` statement will abort the transaction, discarding all updates. + +Include the `RELEASE` clause to cause the application to execute an `EXEC SQL COMMIT RELEASE` and close the connection. + +Use the following statement to rollback a complete transaction: + +```text +EXEC SQL ROLLBACK; +``` + +Invoking this statement will abort the transaction, undoing all changes, erasing any savepoints, and releasing all transaction locks. If you include a savepoint (`my_savepoint` in the following example): + +```text +EXEC SQL ROLLBACK TO SAVEPOINT my_savepoint; +``` + +Only the portion of the transaction that occurred after the `my_savepoint` is rolled back; `my_savepoint` is retained, but any savepoints created after `my_savepoint` will be erased. + +Rolling back to a specified savepoint releases all locks acquired after the savepoint. + +### SAVEPOINT + +Use the `SAVEPOINT` statement to define a `savepoint`; a savepoint is a marker within a transaction. You can use a `ROLLBACK` statement to abort the current transaction, returning the state of the server to its condition prior to the specified savepoint. The syntax of a `SAVEPOINT` statement is: + +```text +EXEC SQL [AT ] SAVEPOINT +``` + +Where: + +`database_name` is the database identifier or a host variable that contains the database identifier against which the savepoint resides. If you omit the `AT` clause, the statement will execute against the current default database. + +`savepoint_name` is the name of the savepoint. If you re-use a `savepoint_name`, the original savepoint is discarded. + +Savepoints can only be established within a transaction block. A transaction block may contain multiple savepoints. + +To create a savepoint named `my_savepoint`, include the statement: + +```text +EXEC SQL SAVEPOINT my_savepoint; +``` + +### SELECT + +ECPGPlus extends support of the `SQL SELECT` statement by providing the `INTO host_variables` clause. The clause allows you to select specified information from an Advanced Server database into a host variable. The syntax for the `SELECT` statement is: + +```text +EXEC SQL [AT ] +SELECT + [ ] + [ ALL | DISTINCT [ ON( , ...) ]] + select_list INTO + + [ FROM from_item [, from_item ]...] + [ WHERE condition ] + [ hierarchical_query_clause ] + [ GROUP BY expression [, ...]] + [ HAVING condition ] + [ { UNION [ ALL ] | INTERSECT | MINUS } (subquery) ] + [ ORDER BY expression [order_by_options]] + [ LIMIT { count | ALL }] + [ OFFSET start [ ROW | ROWS ] ] + [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ] + [ FOR { UPDATE | SHARE } [OF table_name [, ...]][NOWAIT ][...]] +``` + +Where: + +`database_name` is the name of the database (or host variable that contains the name of the database) in which the table resides. This value may take the form of an unquoted string literal, or of a host variable. + +`host_variables` is a list of host variables that will be populated by the `SELECT` statement. If the `SELECT` statement returns more than a single row, `host_variables` must be an array. + +ECPGPlus provides support for the additional clauses of the SQL `SELECT` statement as documented in the PostgreSQL Core documentation available at: + + + +To use the `INTO` `host_variables` clause, include the names of defined host variables when specifying the `SELECT` statement. For example, the following `SELECT` statement populates the `:emp_name` and `:emp_sal` host variables with a list of `employee names` and `salaries`: + +```text +EXEC SQL SELECT ename, sal + INTO :emp_name, :emp_sal + FROM emp + WHERE empno = 7988; +``` + +The enhanced `SELECT` statement also allows you to include parameter markers (question marks) in any clause where a value would be permitted. For example, the following query contains a parameter marker in the `WHERE` clause: + +```text +SELECT * FROM emp WHERE dept_no = ?; +``` + +This `SELECT` statement allows you to provide a value at run-time for the `dept_no` parameter marker. + +### SET CONNECTION + +There are (at least) three reasons you may need more than one connection in a given client application: + +- You may want different privileges for different statements; +- You may need to interact with multiple databases within the same client. +- Multiple threads of execution (within a client application) cannot share a connection concurrently. + +The syntax for the `SET CONNECTION` statement is: + +```text +EXEC SQL SET CONNECTION ; +``` + +Where: + +`connection_name` is the name of the connection to the database. + +To use the `SET CONNECTION` statement, you should open the connection to the database using the second form of the `CONNECT` statement; include the AS clause to specify a `connection_name`. + +By default, the current thread uses the current connection; use the `SET CONNECTION` statement to specify a default connection for the current thread to use. The default connection is only used when you execute an `EXEC SQL` statement that does not explicitly specify a connection name. For example, the following statement will use the default connection because it does not include an `AT` `connection_name` clause. : + +```text +EXEC SQL DELETE FROM emp; +``` + +This statement will not use the default connection because it specifies a connection name using the `AT` `connection_name` clause: + +```text +EXEC SQL AT acctg_conn DELETE FROM emp; +``` + +For example, a client application that creates and maintains multiple connections (such as): + +```text +EXEC SQL CONNECT TO edb AS acctg_conn + USER 'alice' IDENTIFIED BY 'acctpwd'; +``` + +and + +```text +EXEC SQL CONNECT TO edb AS hr_conn + USER 'bob' IDENTIFIED BY 'hrpwd'; +``` + +Can change between the connections with the `SET CONNECTION` statement: + +```text +SET CONNECTION acctg_conn; +``` + +or + +```text +SET CONNECTION hr_conn; +``` + +The server will use the privileges associated with the connection when determining the privileges available to the connecting client. When using the `acctg_conn` connection, the client will have the privileges associated with the role, `alice`; when connected using `hr_conn`, the client will have the privileges associated with `bob`. + +### SET DESCRIPTOR + +Use the `SET DESCRIPTOR` statement to assign a value to a descriptor area using information provided by the client application in the form of a host variable or an integer value. The statement comes in two forms; the first form is: + +```text +EXEC SQL [FOR ] SET DESCRIPTOR + VALUE = ; +``` + +The second form is: + +```text +EXEC SQL [FOR ] SET DESCRIPTOR + COUNT = integer; +``` + +Where: + +`array_size` is an integer value or a host variable that contains an integer value specifying the number of rows to fetch. If you omit the `FOR` clause, the statement is executed once for each member of the array. + +`descriptor_name` specifies the name of a descriptor (as a single-quoted string literal), or a host variable that contains the name of a descriptor. + +Include the `VALUE` clause to describe the information stored in the descriptor. + +- `column_number` identifies the position of the variable within the descriptor. + +- `descriptor_item` specifies the type of the descriptor item. + +- `host_variable` specifies the name of the host variable that contains the value of the item. + +ECPGPlus implements the following `descriptor_item` types: + +- `TYPE` +- `LENGTH` +- `[REF] INDICATOR` +- `[REF] DATA` +- `[REF] RETURNED LENGTH` + +For example, a client application might prompt a user for a dynamically created query: + +```text +query_text = promptUser("Enter a query"); +``` + +To execute a dynamically created query, you must first `prepare` the query (parsing and validating the syntax of the query), and then `describe` the `input` parameters found in the query using the `EXEC SQL DESCRIBE INPUT` statement. + +```text +EXEC SQL ALLOCATE DESCRIPTOR query_params; +EXEC SQL PREPARE emp_query FROM :query_text; + +EXEC SQL DESCRIBE INPUT emp_query + USING SQL DESCRIPTOR 'query_params'; +``` + +After describing the query, the `query_params` descriptor contains information about each parameter required by the query. + +For this example, we'll assume that the user has entered: + +```text +SELECT ename FROM emp WHERE sal > ? AND job = ?;, +``` + +In this case, the descriptor describes two parameters, one for `sal > ?` and one for `job = ?`. + +To discover the number of parameter markers (question marks) in the query (and therefore, the number of values you must provide before executing the query), use: + +```text +EXEC SQL GET DESCRIPTOR … :host_variable = COUNT; +``` + +Then, you can use `EXEC SQL GET DESCRIPTOR` to retrieve the name of each parameter. You can also use `EXEC SQL GET DESCRIPTOR` to retrieve the type of each parameter (along with the number of parameters) from the descriptor, or you can supply each `value` in the form of a character string and ECPG will convert that string into the required data type. + +The data type of the first parameter is `numeric`; the type of the second parameter is `varchar`. The name of the first parameter is `sal`; the name of the second parameter is `job`. + +Next, loop through each parameter, prompting the user for a value, and store those values in host variables. You can use `GET DESCRIPTOR … COUNT` to find the number of parameters in the query. + +```text +EXEC SQL GET DESCRIPTOR 'query_params' + :param_count = COUNT; + +for(param_number = 1; + param_number <= param_count; + param_number++) +{ +``` + +Use `GET DESCRIPTOR` to copy the name of the parameter into the `param_name` host variable: + +```text +EXEC SQL GET DESCRIPTOR 'query_params' + VALUE :param_number :param_name = NAME; + +reply = promptUser(param_name); +if (reply == NULL) + reply_ind = 1; /* NULL */ +else + reply_ind = 0; /* NOT NULL */ +``` + +To associate a `value` with each parameter, you use the `EXEC SQL SET DESCRIPTOR` statement. For example: + +```text +EXEC SQL SET DESCRIPTOR 'query_params' + VALUE :param_number DATA = :reply; +EXEC SQL SET DESCRIPTOR 'query_params' + VALUE :param_number INDICATOR = :reply_ind; +} +``` + +Now, you can use the `EXEC SQL EXECUTE DESCRIPTOR` statement to execute the prepared statement on the server. + +### UPDATE + +Use an `UPDATE` statement to modify the data stored in a table. The syntax is: + +```text +EXEC SQL [AT ][FOR ] + UPDATE [ ONLY ] table [ [ AS ] alias ] + SET {column = { expression | DEFAULT } | + (column [, ...]) = ({ expression|DEFAULT } [, ...])} [, ...] + [ FROM from_list ] + [ WHERE condition | WHERE CURRENT OF cursor_name ] + [ RETURNING * | output_expression [[ AS ] output_name] [, ...] ] +``` + +Where: + +`database_name` is the name of the database (or host variable that contains the name of the database) in which the table resides. This value may take the form of an unquoted string literal, or of a host variable. + +Include the `FOR exec_count` clause to specify the number of times the statement will execute; this clause is valid only if the `SET` or `WHERE` clause contains an array. + +ECPGPlus provides support for the additional clauses of the SQL `UPDATE` statement as documented in the PostgreSQL Core documentation available at: + + + +A host variable can be used in any clause that specifies a value. To use a host variable, simply substitute a defined variable for any value associated with any of the documented `UPDATE` clauses. + +The following `UPDATE` statement changes the job description of an employee (identified by the `:ename` host variable) to the value contained in the `:new_job` host variable, and increases the employees salary, by multiplying the current salary by the value in the `:increase` host variable: + +```text +EXEC SQL UPDATE emp + SET job = :new_job, sal = sal * :increase + WHERE ename = :ename; +``` + +The enhanced `UPDATE` statement also allows you to include parameter markers (question marks) in any clause where an input value would be permitted. For example, we can write the same update statement with a parameter marker in the `WHERE` clause: + +```text +EXEC SQL UPDATE emp + SET job = ?, sal = sal * ? + WHERE ename = :ename; +``` + +This `UPDATE` statement could allow you to prompt the user for a new value for the `job` column and provide the amount by which the `sal` column is incremented for the employee specified by `:ename`. + +### WHENEVER + +Use the `WHENEVER` statement to specify the action taken by a client application when it encounters an SQL error or warning. The syntax is: + +```text +EXEC SQL WHENEVER ; +``` + +The following table describes the different conditions that might trigger an `action`: + +| **Condition** | **Description** | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `NOT FOUND` | The server returns a `NOT FOUND` condition when it encounters a `SELECT` that returns no rows, or when a `FETCH` reaches the end of a result set. | +| `SQLERROR` | The server returns an `SQLERROR` condition when it encounters a serious error returned by an SQL statement. | +| `SQLWARNING` | The server returns an `SQLWARNING` condition when it encounters a non-fatal warning returned by an SQL statement. | + +The following table describes the actions that result from a client encountering a `condition`: + +| **Action** | **Description** | +| ----------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CALL function [([args])]` | Instructs the client application to call the named `function`. | +| `CONTINUE` | Instructs the client application to proceed to the next statement. | +| `DO BREAK` | Instructs the client application to a C break statement. A break statement may appear in a `loop` or a `switch` statement. If executed, the break statement terminate the `loop` or the `switch` statement. | +| `DO CONTINUE` | Instructs the client application to emit a C `continue` statement. A `continue` statement may only exist within a loop, and if executed, will cause the flow of control to return to the top of the loop. | +| `DO function ([args])` | Instructs the client application to call the named `function`. | +| `GOTO label` or `GO TO label` | Instructs the client application to proceed to the statement that contains the `label`. | +| `SQLPRINT` | Instructs the client application to print a message to standard error. | +| `STOP` | Instructs the client application to stop execution. | + +The following code fragment prints a message if the client application encounters a warning, and aborts the application if it encounters an error: + +```text +EXEC SQL WHENEVER SQLWARNING SQLPRINT; +EXEC SQL WHENEVER SQLERROR STOP; +``` + +Include the following code to specify that a client should continue processing after warning a user of a problem: + +```text +EXEC SQL WHENEVER SQLWARNING SQLPRINT; +``` + +Include the following code to call a function if a query returns no rows, or when a cursor reaches the end of a result set: + +```text +EXEC SQL WHENEVER NOT FOUND CALL error_handler(__LINE__); +``` diff --git a/product_docs/docs/epas/11/ecpgplus_guide/images/ecpg_path.png b/product_docs/docs/epas/11/ecpgplus_guide/images/ecpg_path.png new file mode 100755 index 00000000000..c08335bc0f8 --- /dev/null +++ b/product_docs/docs/epas/11/ecpgplus_guide/images/ecpg_path.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:848c6389145c5062fe2fac5b6235b381616c6b8a99311de5ee91e09b036ea205 +size 73046 diff --git a/product_docs/docs/migration_toolkit/53.0.2/images/EDB_logo.png b/product_docs/docs/epas/11/ecpgplus_guide/images/edb_logo.png old mode 100755 new mode 100644 similarity index 100% rename from product_docs/docs/migration_toolkit/53.0.2/images/EDB_logo.png rename to product_docs/docs/epas/11/ecpgplus_guide/images/edb_logo.png diff --git a/product_docs/docs/migration_toolkit/53.0.2/images/edb_logo.svg b/product_docs/docs/epas/11/ecpgplus_guide/images/edb_logo.svg old mode 100755 new mode 100644 similarity index 100% rename from product_docs/docs/migration_toolkit/53.0.2/images/edb_logo.svg rename to product_docs/docs/epas/11/ecpgplus_guide/images/edb_logo.svg diff --git a/product_docs/docs/epas/11/ecpgplus_guide/index.mdx b/product_docs/docs/epas/11/ecpgplus_guide/index.mdx new file mode 100644 index 00000000000..89311ae0fa3 --- /dev/null +++ b/product_docs/docs/epas/11/ecpgplus_guide/index.mdx @@ -0,0 +1,30 @@ +--- +navTitle: ECPGPlus +title: "ECPGPlus Guide" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/13/index.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/13/conclusion.html" + - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/13/introduction.html" +--- + +EnterpriseDB has enhanced ECPG (the PostgreSQL pre-compiler) to create ECPGPlus. ECPGPlus allows you to include Pro\*C compatible embedded SQL commands in C applications when connected to an EDB Postgres Advanced Server (Advanced Server) database. When you use ECPGPlus to compile an application, the SQL code is syntax-checked and translated into C. + +ECPGPlus supports: + +- Oracle Dynamic SQL – Method 4 (ODS-M4). +- Pro\*C compatible anonymous blocks. +- A `CALL` statement compatible with Oracle databases. + +As part of ECPGPlus's Pro\*C compatibility, you do not need to include the `BEGIN DECLARE SECTION` and `END DECLARE SECTION` directives. + +**PostgreSQL Compatibility** + +While most ECPGPlus statements will work with community PostgreSQL, the `CALL` statement, and the `EXECUTE…END EXEC` statement work only when the client application is connected to EDB Postgres Advanced Server. + +
+ +introduction overview using_embedded_sql using_descriptors building_executing_dynamic_sql_statements error_handling reference conclusion + +
diff --git a/product_docs/docs/epas/11/edb_plus/02_edb_plus.mdx b/product_docs/docs/epas/11/edb_plus/02_edb_plus.mdx new file mode 100644 index 00000000000..60998c1669b --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/02_edb_plus.mdx @@ -0,0 +1,18 @@ +--- +title: "EDB*Plus" +legacyRedirects: + - "/edb-docs/d/edbplus/user-guides/edbplus-users-guide/39/edb_plus.html" +--- + + + +EDB\*Plus is a utility program that provides a command line user interface to EDB Postgres Advanced Server. EDB\*Plus accepts SQL commands, SPL anonymous blocks, and EDB\*Plus commands. + +EDB\*Plus commands are compatible with Oracle SQL\*Plus commands and provide various capabilities including: + +- Querying certain database objects +- Executing stored procedures +- Formatting output from SQL commands +- Executing batch scripts +- Executing OS commands +- Recording output diff --git a/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/01_installing_prereq.mdx b/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/01_installing_prereq.mdx new file mode 100644 index 00000000000..b59f6c29470 --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/01_installing_prereq.mdx @@ -0,0 +1,36 @@ +--- +title: "Installation Prerequisites" + +legacyRedirects: + - "/edb-docs/d/edbplus/user-guides/edbplus-users-guide/38/01_installing_prereq.html" +--- + + + +Before installing EDB\*Plus, you must first install Java (version 1.7 or later). On a Linux system, you can use the `yum` package manager to install Java. Open a terminal window, assume superuser privileges, and enter: + + ```text + # yum install java + ``` + +If you are using Windows, Java installers and instructions are available online at: + + + + +You must also have credentials that allow access to the EDB repository. For information about requesting credentials, visit: + + + + +After receiving your repository credentials: + +1. Create the repository configuration file. +2. Modify the file, providing your user name and password. +3. Install EDB\*Plus. + + + + + + diff --git a/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/02_rpm_installation.mdx b/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/02_rpm_installation.mdx new file mode 100644 index 00000000000..347f951394e --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/02_rpm_installation.mdx @@ -0,0 +1,129 @@ +--- +title: "Performing an RPM Installation" +legacyRedirects: + - "/edb-docs/d/edbplus/user-guides/edbplus-users-guide/38/rpm_installation" +--- + + + +For detailed information about creating and using EDB repositories to install Advanced Server or its supporting components, see the *EDB Postgres Advanced Server Installation Guide available* at: + + + +**Creating a Repository Configuration File** + +To create the repository configuration file, assume superuser privileges, and invoke the following command: + +- On RHEL or CentOS 7: + + ```text + yum -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +- On RHEL or CentOS 8: + + ```text + dnf -y install https://yum.enterprisedb.com/edbrepos/edb-repo-latest.noarch.rpm + ``` + +The repository configuration file is named `edb.repo`. The file resides in `/etc/yum.repos.d`. After saving your changes to the configuration file, you can use the following command to install EDB*Plus: + +- On RHEL or CentOS 7: + + ``` + yum install edb-asxx-edbplus + ``` + +- On RHEL or CentOS 8: + + ``` + dnf install edb-asxx-edbplus + ``` + +Where, `xx` is the Advanced Server version. + +When you install an RPM package that is signed by a source that is not recognized by your system, yum may ask for your permission to import the key to your local server. If prompted, and you are satisfied that the packages come from a trustworthy source, entery, and press `Return` to continue. + +During the installation, yum may encounter a dependency that it cannot resolve. If it does, it will provide a list of the required dependencies that you must manually resolve. + + +## Configuring an RPM Installation + +After performing an RPM installation of EDB\*Plus, you must set the values of environment variables that allow +EDB\*Plus to locate your Java installation. Use the following commands to set variable values: + +```text +export JAVA_HOME= +export PATH=/bin:$PATH +``` + +By default, the `pg_hba.conf` file for the RPM installer enforces `IDENT` authentication. Before invoking EDB*Plus, you must either modify the `pg_hba.conf` file, changing the authentication method to a form other than `IDENT` (and restarting the server), or perform the following steps to ensure that an `IDENT` server is accessible: + +You must confirm that an `identd` server is installed and running. You can use the `yum` package manager to install an `identd` server by invoking the command: + +- On RHEL or CentOS 7: + + ```text + yum install xinetd authd + ``` + +- On RHEL or CentOS 8: + + ```text + dnf install xinetd authd + ``` + +The command should create a file named ``/etc/xinetd.d/auth`` that contains: + +```text +service auth +{ + disable = yes + socket_type = stream +wait =no +user = ident +cps = 4096 10 +instances = UNLIMITED +server = /usr/sbin/in.authd server_args = -t60 --xerror -os +} +``` + +!!! Note + If the file includes a `-E` argument at the end of the server arguments, please erase `-E`. + +Then, to start the `identd` server, invoke the following commands: + +```text +systemctl enable xinetd +systemctl start xinetd +``` + +Open the `pg_ident.conf` file and create a user mapping: + +``` +# map_name system_username postgres_username + edbas enterprisedb enterprisedb +``` + +Where: + +- The name specified in the `map_name` column is a user-defined name that will identify the mapping in the `pg_hba.conf` file. +- The name specified in the `system_username` column is `enterprisedb`. +- The name specified in the `postgres_username` column is `enterprisedb`. + +Then, open the `pg_hba.conf` file and modify the `IDENT` entries: + +- If you are using an IPv4 local connection, modify the file entry to read: + + `host all all 127.0.0.0/0 ident map=edbas` + +- If you are using an IPv6 local connection, modify the file entry to read: + + `host all all ::1/128 ident map=edbas` + +You must restart the Advanced Server service before invoking EDB\*Plus. For detailed information about controlling the Advanced Server service, see the *EDB Postgres Advanced Server Installation Guide*, available at: + + + + + diff --git a/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/03_using_gui.mdx b/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/03_using_gui.mdx new file mode 100644 index 00000000000..9c9e77a7871 --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/03_using_gui.mdx @@ -0,0 +1,49 @@ +--- +title: "Using the Graphical Installer" +legacyRedirects: + - "/edb-docs/d/edbplus/user-guides/edbplus-users-guide/39/installing_edb_plus.html" +--- + + + +Graphical installers for EDB\*Plus are available via StackBuilder Plus; you can access StackBuilder Plus through your Windows or Linux start menu. After opening StackBuilder Plus and selecting the installation for which you wish to install EDB\*Plus, expand the component selection screen tree control to select and download the EDB\*Plus installer. + +![The EDB\*Plus Welcome window](../images/edb_plus_welcome_1.png) + +
Fig. 1: The EDB*Plus Welcome window
+ + +The EDB\*Plus installer welcomes you to the setup wizard, as shown in the figure below. + +![The Installation Directory window](../images/installation_directory.png) + +
Fig. 2: The Installation Directory window
+ + +Use the `Installation Directory` field to specify the directory in which you wish to install the EDB\*Plus software. Then, click `Next` to continue. + +![The Advanced Server Installation Details window](../images/advanced_server_installation_details.png) + +
Fig. 3: The Advanced Server Installation Details window
+ + +Use fields on the `EDB Postgres Advanced Server Installation Details` window to identify the location of the Advanced Server host: + +- Use the `Host` field to identify the system on which Advanced Server resides. +- Use the `Port` field to identify the listener port that Advanced Server monitors for client connections. + +Then, click `Next` to continue. + +![The Ready to Install window](../images/ready_to_install.png) + +
Fig. 4: The Ready to Install window
+ + +The `Ready to Install` window notifies you when the installer has all of the information needed to install EDB\*Plus on your system. Click `Next` to install EDB\*Plus. + +![The installation is complete](../images/installation_complete.png) + +
Fig. 5: The installation is complete
+ + +The installer notifies you when the setup wizard has completed the EDB\*Plus installation. Click `Finish` to exit the installer. diff --git a/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/index.mdx b/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/index.mdx new file mode 100644 index 00000000000..9f951cd7871 --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/03_installing_edb_plus.mdx/index.mdx @@ -0,0 +1,15 @@ +--- +title: "Installing EDB*Plus" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/installation-getting-started/edb-plus/12/installing_edb_plus.html" +--- + +You can use an RPM installer or a graphical installer to add EDB*Plus to your Advanced Server installation. + +
+ +installing_prereq rpm_installation using_gui + +
\ No newline at end of file diff --git a/product_docs/docs/epas/11/edb_plus/04_using_edb_plus.mdx b/product_docs/docs/epas/11/edb_plus/04_using_edb_plus.mdx new file mode 100644 index 00000000000..bab4690fb20 --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/04_using_edb_plus.mdx @@ -0,0 +1,147 @@ +--- +title: "Using EDB*Plus" +legacyRedirects: + - "/edb-docs/d/edbplus/user-guides/edbplus-users-guide/39/using_edb_plus.html" +--- + + + + + +To open an EDB\*Plus command line, navigate through the `Applications` or `Start` menu to the Advanced Server menu, to the `Run SQL Command Line` menu, and select the EDB\*Plus option. You can also invoke EDB\*Plus from the operating system command line with the following command: + +```text +edbplus [ -S[ILENT ] ] [ | /NOLOG ] [ @[. ] ] +``` + +`SILENT` + + If specified, the EDB\*Plus sign-on banner is suppressed along with all prompts. + +`login` + + Login information for connecting to the database server and database. `login` takes the following form; there must be no white space within the login information. + +```text +[/][@{ | } ] +``` + + Where: + + `username` is a database username with which to connect to the database. + + `password` is the password associated with the specified `username`. If a `password` is not provided, but a password is required for authentication, a password file is used if available. If there is no password file or no entry in the password file with the matching connection parameters, then EDB\*Plus will prompt for the password. + + `connectstring` is the database connection string with the following format: + +```text +[:][/][?ssl={true | false}] +``` + + Where: + +- `host` is the hostname or IP address on which the database server resides. If neither `@connectstring` nor `@variable` nor `/NOLOG` is specified, the default host is assumed to be the localhost. + +- `port` is the port number receiving connections on the database server. If not specified, the default is `5444`. + +- `dbname` is the name of the database to connect to. If not specified the default is `edb`. + +- If `Internet Protocol version 6` (IPv6) is used for the connection instead of IPv4, then the IP address must be enclosed within square brackets (that is, `[ipv6_address]`). The following is an example using an IPv6 connection: + +```text +edbplus.sh enterprisedb/password@[fe80::20c:29ff:fe7c:78b2]:5444/edb +``` + + The `pg_hba.conf` file for the database server must contain an appropriate entry for the IPv6 connection. The following example shows an entry that allows all addresses: + +```text +# TYPE DATABASE USER ADDRESS METHOD +host all all ::0/0 md5 +``` + +For more information about the `pg_hba.conf` file, see the PostgreSQL core documentation at: + + + + If an SSL connection is desired, then include the `?ssl=true` parameter in the connection string. In such a case, the connection string must minimally include `host:port`, with or without `/dbname`. If the `ssl` parameter is not specified, the default is `false`. See [Using a Secure Sockets Layer (SSL) Connection](05_using_edb_plus_with_ssl/#using_edb_plus_with_ssl) for instructions on setting up an SSL connection. + + `variable` is a variable defined in the `login.sql` file that contains a database connection string. The `login.sql` file can be found in the `edbplus` subdirectory of the Advanced Server home directory. + +`/NOLOG` + + Specify `/NOLOG` to start EDB\*Plus without establishing a database connection. SQL commands and EDB\*Plus commands that require a database connection cannot be used in this mode. The `CONNECT` command can be subsequently given to connect to a database after starting EDB\*Plus with the `/NOLOG` option. + +`scriptfile[.ext ]` + + `scriptfile` is the name of a file residing in the current working directory, containing SQL and/or EDB\*Plus commands that will be automatically executed after startup of EDB\*Plus. `ext` is the filename extension. If the filename extension is `sql`, then the `.sql` extension may be omitted when specifying `scriptfile`. When creating a script file, always name the file with an extension, otherwise it will not be accessible by EDB\*Plus. (EDB\*Plus will always assume a `.sql` extension on filenames that are specified with no extension.) + +The following example shows user `enterprisedb` with password `password`, connecting to database `edb` running on a database server on the `localhost` at port `5444`. + +```text +C:\Program Files\edb\as11\edbplus>edbplus enterprisedb/password +Connected to EnterpriseDB 11.0.1 (localhost:5444/edb) AS enterprisedb + +EDB*Plus: Release 11 (Build 37.0.0) +Copyright (c) 2008-2021, EnterpriseDB Corporation. All rights reserved. + +SQL> +``` + +The following example shows user `enterprisedb` with password, `password`, connecting to database `edb` running on a database server on the `localhost` at port `5445`. + +```text +C:\Program Files\edb\as11\edbplus>edbplus enterprisedb/password@localhost:5445/edb +Connected to EnterpriseDB 11.0.1 (localhost:5445/edb) AS enterprisedb + +EDB*Plus: Release 11 (Build 37.0.0) +Copyright (c) 2008-2021, EnterpriseDB Corporation. All rights reserved. + +SQL> +``` + +Using variable `hr_5445` in the `login.sql` file, the following illustrates how it is used to connect to database `hr` on localhost at port `5445`. + +```text +C:\Program Files\edb\as11\edbplus>edbplus enterprisedb/password@hr_5445 +Connected to EnterpriseDB 11.0.1 (localhost:5445/hr) AS enterprisedb + +EDB*Plus: Release 11 (Build 37.0.0) +Copyright (c) 2008-2021, EnterpriseDB Corporation. All rights reserved. + +SQL> +``` + +The following is the content of the `login.sql` file used in the previous example. + +```text +define edb="localhost:5445/edb" +define hr_5445="localhost:5445/hr" +``` + +The following example executes a script file, `dept_query.sql` after connecting to database `edb` on server localhost at port `5444`. + +```text +C:\Program Files\edb\as11\edbplus>edbplus enterprisedb/password @dept_query +Connected to EnterpriseDB 11.0.1 (localhost:5444/edb) AS enterprisedb + +SQL> SELECT * FROM dept; + +DEPTNO DNAME LOC +------ -------------- ------------- +10 ACCOUNTING NEW YORK +20 RESEARCH DALLAS +30 SALES CHICAGO +40 OPERATIONS BOSTON + +SQL> EXIT +Disconnected from EnterpriseDB Database. +``` + +The following is the content of file `dept_query.sql` used in the previous example. + +```text +SET PAGESIZE 9999 +SET ECHO ON +SELECT * FROM dept; +EXIT +``` diff --git a/product_docs/docs/epas/11/edb_plus/05_using_edb_plus_with_ssl.mdx b/product_docs/docs/epas/11/edb_plus/05_using_edb_plus_with_ssl.mdx new file mode 100644 index 00000000000..e9a0cc4544d --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/05_using_edb_plus_with_ssl.mdx @@ -0,0 +1,326 @@ +--- +title: "Using a Secure Sockets Layer (SSL) Connection" +legacyRedirects: + - "/edb-docs/d/edbplus/user-guides/edbplus-users-guide/39/using_edb_plus_with_ssl.html" +--- + + + + + +An EDB\*Plus connection to the Advanced Server database can be accomplished using secure sockets layer (SSL) connectivity. + +Using SSL requires various prerequisite configuration steps performed on the database server involved with the SSL connection as well as creation of the Java truststore and keystore on the host that will run EDB\*Plus. + +The Java *truststore* is the file containing the Certificate Authority (CA) certificates with which the Java client (EDB\*Plus) uses to verify the authenticity of the server to which it is initiating an SSL connection. + +The Java *keystore* is the file containing private and public keys and their corresponding certificates. The keystore is required for client authentication to the server, which is used for the EDB\*Plus connection. + +The following is material to which you can refer to for guidance in setting up the SSL connections: + +- For information on setting up SSL connectivity to the Advanced Server database, see the section about secure TCP connections with SSL in Chapter 18 “Server Setup and Operation” in the PostgreSQL Core Documentation located at: + + + +- For information on JDBC client connectivity using SSL, see the section on configuring the client in Chapter 4 “Using SSL” in The PostgreSQL JDBC Interface located at: + + + +The following sections provide information for the configuration steps of using SSL. + +- Configuring SSL on Advanced Server +- Configuring SSL for the EDB\*Plus client +- Requesting SSL connection to the Advanced Server database + +## Configuring SSL on Advanced Server + +This section provides an example of configuring SSL on a database server to demonstrate the use of SSL with EDB\*Plus. A self-signed certificate is used for this purpose. + +**Step 1:** Create the certificate signing request (CSR). + +In the following example the generated certificate signing request file is `server.csr`. The private key is generated as file `server.key`. + +```text +$ openssl req -new -nodes -text -out server.csr \ +> -keyout server.key -subj "/CN=enterprisedb" +Generating a 2048 bit RSA private key +.............................+++ +....................................................................+++ +writing new private key to 'server.key' +----- +``` + +!!! Note + When creating the certificate, the value specified for the common name field (designated as `CN=enterprisedb` in this example) must be the database user name that is specified when connecting to EDB\*Plus. + +In addition, user name maps can be used as defined in the `pg_ident.conf` file to permit more flexibility for the common name and database user name. Steps 8 and 9 describe the use of user name `maps`. + +**Step 2:** Generate the self-signed certificate. + +The following generates a self-signed certificate to file `server.crt` using the certificate signing request file, `server.csr`, and the private key, `server.key`, as input. + +```text +$ openssl x509 -req -days 365 -in server.csr -signkey server.key \ +> -out server.crt +Signature ok +subject=/CN=enterprisedb +Getting Private key +``` + +**Step 3:** Make a copy of the server certificate (`server.crt`) to be used as the root Certificate Authority (CA) file (`root.crt`). + +```text +$ cp server.crt root.crt +``` + +**Step 4:** Delete the now redundant certificate signing request (`server.csr`). + +```text +$ rm -f server.csr +``` + +**Step 5:** Move or copy the certificate and private key files to the Advanced Server data directory (for example, `/opt/edb/as11/data`). + +```text +$ mv root.crt /opt/edb/as11/data +$ mv server.crt /opt/edb/as11/data +$ mv server.key /opt/edb/as11/data +``` + +**Step 6:** Set the file ownership and permissions on the certificate files and private key file. + +Set the ownership to the operating system account that owns the data sub-directory of the database server. Set the permissions so that no other groups or accounts other than the owner can access these files. + +```text +$ chown enterprisedb root.crt server.crt server.key +$ chgrp enterprisedb root.crt server.crt server.key +$ chmod 600 root.crt server.crt server.key +$ ls -l +total 152 + . + . + . +-rw------- 1 enterprisedb enterprisedb 985 Aug 22 11:00 root.crt +-rw------- 1 enterprisedb enterprisedb 985 Aug 22 10:59 server.crt +-rw------- 1 enterprisedb enterprisedb 1704 Aug 22 10:58 server.key +``` + +**Step 7:** In the `postgresql.conf` file, make the following modifications. + +```text +ssl = on +ssl_cert_file = 'server.crt' +ssl_key_file = 'server.key' +ssl_ca_file = 'root.crt' +``` + +**Step 8:** Modify the `pg_hba.conf` file to enable SSL usage on the desired database to which EDB\*Plus is to make the SSL connection. + +In the `pg_hba.conf` file, the `hostssl` type indicates the entry is used to validate SSL connection attempts from the client (EDB\*Plus). + +The authentication method is set to cert with the option `clientcert=1` in order to require an SSL certificate from the client against which authentication is performed using the common name of the certificate (`enterprisedb` in this example). + +The `map=sslusers` option specifies that a mapping named `sslusers` defined in the `pg_ident.conf` file is to be used for authentication. This mapping allows a connection to the database if the common name from the certificate and the database user name attempting the connection match the `SYSTEM-USERNAME/PG-USERNAME` pair listed in the `pg_ident.conf` file. + +The following is an example of the settings in the `pg_hba.conf` file if the database `(edb)` must use SSL connections. + +```text +# TYPE DATABASE USER ADDRESS METHOD + +# "local" is for Unix domain socket connections only +local all all md5 +# IPv4 local connections: +hostssl edb all 192.168.2.0/24 cert clientcert=1 map=sslusers +``` + +**Step 9:** The following shows the user name maps in the `pg_ident.conf` file related to the `pg_hba.conf` file by the `map=sslusers` option. These user name maps permit you to specify database user names `edbuser`, `postgres`, or `enterprisedb` when connecting with EDB\*Plus. + +```text +# MAPNAME SYSTEM-USERNAME PG-USERNAME + sslusers enterprisedb edbuser + sslusers enterprisedb postgres + sslusers enterprisedb enterprisedb +``` + +**Step 10:** Restart the database server after you have made the changes to the configuration files. + +## Configuring SSL for the EDB\*Plus Client + +After you have configured SSL on the database server, the following steps provide an example of generating certificate and keystore files for EDB\*Plus (the JDBC client). + +**Step 1:** Using files `server.crt` and `server.key` located under the database server data sub-directory, create copies of these files and move them to the host where EDB\*Plus is to be running. + +Store these files in the desired directory to contain the trusted certificate and keystore files to be generated in the following steps. The suggested location is to create a `.postgresql` sub-directory under the home user account that will invoke EDB\*Plus. Thus, these files will be under the `~/.postgresql` directory of the user account that will run EDB\*Plus. + +For this example, assume file `edb.crt` is a copy of `server.crt` and `edb.key` is a copy of `server.key`. + +**Step 2:** Create an additional copy of `edb.crt`. + +```text +$ cp edb.crt edb_root.crt +$ ls -l +total 12 +-rw-r--r-- 1 user user 985 Aug 22 14:17 edb.crt +-rw-r--r-- 1 user user 1704 Aug 22 14:18 edb.key +-rw-r--r-- 1 user user 985 Aug 22 14:19 edb_root.crt +``` + +**Step 3:** Create a Distinguished Encoding Rules (DER) format of file `edb_root.crt`. The generated DER format of this file is `edb_root.crt.der`. The DER format of the file is required for the `keytool` program used in Step 4. + +```text +$ openssl x509 -in edb_root.crt -out edb_root.crt.der -outform der +$ ls -l +total 16 +-rw-r--r-- 1 user user 985 Aug 22 14:17 edb.crt +-rw-r--r-- 1 user user 1704 Aug 22 14:18 edb.key +-rw-r--r-- 1 user user 985 Aug 22 14:19 edb_root.crt +-rw-rw-r-- 1 user user 686 Aug 22 14:21 edb_root.crt.der +``` + +**Step 4:** Use the `keytool` program to create a keystore file `(postgresql.keystore)` using `edb_root.crt.der` as the input. This process adds the certificate of the Postgres database server to the keystore file. + +!!! Note + The file name `postgresql.keystore` is recommended so that it can be accessed in its default directory location `~/.postgresql postgresql.keystore`, which is under the home directory of the user account invoking EDB\*Plus. Also note that the file name suffix can be `.jks` instead of `.keystore` (thus, file name `postgresql.jks`). However, in the remainder of these examples, file name `postgresql.keystore` is used. + +**For Windows only:** The path is `%APPDATA%\.postgresql\postgresql.keystore` + +The `keytool` program can be found under the `bin` subdirectory of the Java Runtime Environment installation. + +You will be prompted for a new password. Save this password as it must be specified with the PGSSLCERTPASS environment variable. + +```text +$ /usr/java/jdk1.8.0_131/jre/bin/keytool -keystore postgresql.keystore \ +> -alias postgresqlstore -import -file edb_root.crt.der +Enter keystore password: +Re-enter new password: +Owner: CN=enterprisedb +Issuer: CN=enterprisedb +Serial number: c60f40256b0e8d53 +Valid from: Tue Aug 22 10:59:25 EDT 2017 until: Wed Aug 22 10:59:25 EDT 2018 +Certificate fingerprints: + MD5: 85:0B:E9:A7:6E:4F:7C:B0:9B:D6:3A:44:55:E2:E9:8E + SHA1: DD:A6:71:24:0B:6C:F8:BC:7A:4C:89:9B:DC:22:6A:6C:B0:F5:3F:7C + SHA256: +DC:02:64:E2:B0:E9:6F:1C:FC:4F:AE:E6:18:85:0B:79:57:43:C3:C5:AE:43:0D:37 +:49:53:6D:11:69:06:46:48 + Signature algorithm name: SHA1withRSA + Version: 1 +Trust this certificate? [no]: yes +Certificate was added to keystore +``` + +**Step 5:** Create a `PKCS #12` format of the keystore file `(postgresql.p12)` using files `edb.crt` and `edb.key` as input. + +!!! Note + The file name `postgresql.p12` is recommended so that it can be accessed in its default directory location `~/.postgresql/postgresql.p12`, which is under the home directory of the user account invoking EDB\*Plus. + +**For Windows only:** The path is `%APPDATA%\.postgresql\postgresql.p12` + +You will be prompted for a new password. Save this password as it must be specified with the PGSSLKEYPASS environment variable. + +```text +$ openssl pkcs12 -export -in edb.crt -inkey edb.key -out postgresql.p12 +Enter Export Password: +Verifying - Enter Export Password: +$ ls –l +total 24 +-rw-rw-r-- 1 user user 985 Aug 24 12:18 edb.crt +-rw-rw-r-- 1 user user 1704 Aug 24 12:18 edb.key +-rw-rw-r-- 1 user user 985 Aug 24 12:20 edb_root.crt +-rw-rw-r-- 1 user user 686 Aug 24 12:20 edb_root.crt.der +-rw-rw-r-- 1 user user 758 Aug 24 12:26 postgresql.keystore +-rw-rw-r-- 1 user user 2285 Aug 24 12:28 postgresql.p12 +``` + +**Step 6:** If the `postgresql.keystore` and `postgresql.p12` files are not already in the `~/.postgresql` directory, move or copy them to that location. + +**For Windows only:** The directory is `%APPDATA%\.postgresql` + +**Step 7:** If the default location `~/.postgresql` is not used, then the full path (including the file name) to the `postgresql.keystore` file must be set with the `PGSSLCERT` environment variable, and the full path (including the file name) to file `postgresql.p12` must be set with the `PGSSLKEY` environment variable before invoking EDB\*Plus. + +In addition, if the generated file from Step 4 was not named `postgresql.keystore` or `postgresql.jks` then, use the `PGSSLCERT` environment variable to designate the file name and its location. Similarly, if the generated file from Step 5 was not named `postgresql.p12` then, use the `PGSSLKEY` environment variable to designate the file name and its location. + +## Requesting an SSL Connection between EDB\*Plus and the Advanced Server Database + +Be sure the following topics have been addressed in order to perform an SSL connection: + +- The trusted certificate and keystore files have been generated for both the database server and the client host to be invoking EDB\*Plus. +- The `postgresql.conf` file for the database server contains the updated configuration parameters. +- The `pg_hba.conf` file for the database server contains the required entry for permitting the SSL connection. +- For the client host, either the client’s certificate and keystore files have been placed in the user account’s `~/.postgresql` directory or the environment variables `PGSSLCERT` and `PGSSLKEY` are set before invoking EDB\*Plus. +- The `PGSSLCERTPASS` environment variable is set with a password. +- The `PGSSLKEYPASS` environment variable is set with a password + +When invoking EDB\*Plus, include the `?ssl=true` parameter in the database connection string as shown for the `connectstring` option in [Using EDB\*Plus](04_using_edb_plus/#using_edb_plus). + +The following is an example where EDB\*Plus is invoked from a host that is remote to the database server. + +The `postgresql.conf` file of the database server contains the following modified parameters: + +```text +ssl = on +ssl_cert_file = 'server.crt' +ssl_key_file = 'server.key' +ssl_ca_file = 'root.crt' +``` + +The `pg_hba.conf` file of the database server contains the following entry for connecting from EDB\*Plus on the remote host: + +```text +# TYPE DATABASE USER ADDRESS METHOD + +# "local" is for Unix domain socket connections only +local all all md5 +# IPv4 local connections: +hostssl edb all 192.168.2.24/32 cert clientcert=1 +``` + +On the remote host where EDB\*Plus is to be invoked, the Linux user account named `user` contains the certificate and keystore files in its `~/.postgresql` directory: + +```text +[user@localhost ~]$ whoami +user +[user@localhost ~]$ cd .postgresql +[user@localhost .postgresql]$ pwd +/home/user/.postgresql +[user@localhost .postgresql]$ ls -l +total 8 +-rw-rw-r-- 1 user user 758 Aug 24 12:37 postgresql.keystore +-rw-rw-r-- 1 user user 2285 Aug 24 12:37 postgresql.p12 +``` + +Logged into Linux with the account named `user`, EDB\*Plus is successfully invoked with the `ssl=true` parameter: + +```text +$ export PGSSLCERTPASS=keypass +$ export PGSSLKEYPASS=exppass +$ cd /opt/edb/as11/edbplus +$ ./edbplus.sh enterprisedb/password@192.168.2.22:5444/edb?ssl=true +Connected to EnterpriseDB 11.0.1 (192.168.2.22:5444/edb) AS enterprisedb + +EDB*Plus: Release 11 (Build 37.0.0) +Copyright (c) 2008-2021, EnterpriseDB Corporation. All rights reserved. + +SQL> +``` + +Alternatively, without placing the certificate and keystore files in `~/.postgresql`, but in a different directory, EDB\*Plus can be invoked in the following manner: + +```text +$ export PGSSLCERT=/home/user/ssl/postgresql.keystore +$ export PGSSLKEY=/home/user/ssl/postgresql.p12 +$ export PGSSLCERTPASS=keypass +$ export PGSSLKEYPASS=exppass +$ cd /opt/edb/as11/edbplus +$ ./edbplus.sh enterprisedb/password@192.168.2.22:5444/edb?ssl=true +Connected to EnterpriseDB 11.0.1 (192.168.2.22:5444/edb) AS enterprisedb + +EDB*Plus: Release 11 (Build 37.0.0) +Copyright (c) 2008-2021, EnterpriseDB Corporation. All rights reserved. + +SQL> +``` + +Note that in both cases the database user name used to log into EDB\*Plus is `enterprisedb` as this is the user specified for the common name field when creating the certificate in Step 1 of [Configuring SSL on Advanced Server](#using_ssl_connection). + +Other database user names can be used if the `pg_hba.conf` file with the `map` option and the `pg_ident.conf` file are used. diff --git a/product_docs/docs/epas/11/edb_plus/06_command_summary.mdx b/product_docs/docs/epas/11/edb_plus/06_command_summary.mdx new file mode 100644 index 00000000000..2a73c94dad7 --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/06_command_summary.mdx @@ -0,0 +1,999 @@ +--- +title: "Command Summary" +legacyRedirects: + - "/edb-docs/d/edbplus/user-guides/edbplus-users-guide/39/command_summary.html" +--- + + + +The following sections contains a summary of EDB\*Plus commands. + +## ACCEPT + +The `ACCEPT` command displays a prompt and waits for the user’s keyboard input. The value input by the user is placed in the specified variable. + +```text +ACC[EPT ] variable +``` + +The following example creates a new variable named `my_name`, accepts a value of John Smith, then displays the value using the `DEFINE` command. + +```text +SQL> ACCEPT my_name +Enter value for my_name: John Smith +SQL> DEFINE my_name +DEFINE MY_NAME = "John Smith" +``` + +## APPEND + +`APPEND` is a line editor command that appends the given text to the end of the current line in the SQL buffer. + +```text +A[PPEND ] text +``` + +In the following example, a `SELECT` command is built-in the SQL buffer using the `APPEND` command. Note that two spaces are placed between the `APPEND` command and the `WHERE` clause in order to separate `dept` and `WHERE` by one space in the SQL buffer. + +```text +SQL> APPEND SELECT * FROM dept +SQL> LIST + 1 SELECT * FROM dept +SQL> APPEND WHERE deptno = 10 +SQL> LIST + 1 SELECT * FROM dept WHERE deptno = 10 +``` + +## CHANGE + +`CHANGE` is a line editor command performs a search-and-replace on the current line in the SQL buffer. + +```text +C[HANGE ] FROM [ TO ] +``` + +If `TO/` is specified, the first occurrence of text `FROM` in the current line is changed to text `TO`. If `TO/` is omitted, the first occurrence of text `FROM` in the current line is deleted. + +The following sequence of commands makes line 3 the current line, then changes the department number in the `WHERE` clause from 20 to 30. + +```text +SQL> LIST + 1 SELECT empno, ename, job, sal, comm + 2 FROM emp + 3 WHERE deptno = 20 + 4* ORDER BY empno +SQL> 3 + 3* WHERE deptno = 20 +SQL> CHANGE /20/30/ + 3* WHERE deptno = 30 +SQL> LIST + 1 SELECT empno, ename, job, sal, comm + 2 FROM emp + 3 WHERE deptno = 30 + 4* ORDER BY empno +``` + +## CLEAR + +The `CLEAR` command removes the contents of the SQL buffer, deletes all column definitions set with the `COLUMN` command, or clears the screen. + +```text +CL[EAR ] [ BUFF[ER ] | SQL | COL[UMNS ] | SCR[EEN ] ] +``` + +`BUFFER | SQL` + + Clears the SQL buffer. + +`COLUMNS` + + Removes column definitions. + +`SCREEN` + + Clears the screen. This is the default if no options are specified. + +## COLUMN + +The `COLUMN` command controls output formatting. The formatting attributes set by using the `COLUMN` command remain in effect only for the duration of the current session. + +```text +COL[UMN ] + [ column + { CLE[AR ] | + { FOR[MAT ] spec | + HEA[DING ] text | + { OFF | ON } + } [...] + } + ] +``` + +If the `COLUMN` command is specified with no subsequent options, formatting options for current columns in effect for the session are displayed. + +If the `COLUMN` command is followed by a column name, then the column name may be followed by one of the following: + +1. No other options +2. `CLEAR` +3. Any combination of `FORMAT`, `HEADING`, and one of `OFF` or `ON` + +`column` + + Name of a column in a table to which subsequent column formatting options are to apply. If no other options follow `column`, then the current column formatting options if any, of `column` are displayed. + +`CLEAR` + + The `CLEAR` option reverts all formatting options back to their defaults for `column`. If the `CLEAR` option is specified, it must be the only option specified. + +`spec` + + Format specification to be applied to `column`. For character columns, `spec` takes the following format: + +`n` + + `n` is a positive integer that specifies the column width in characters within which to display the data. Data in excess of `n` will wrap around with the specified column width. + + For numeric columns, `spec` is comprised of the following elements. + + Table - Numeric Column Format Elements + +| Element | Description | +| ------- | ------------------------------------------ | +| `$` | Display a leading dollar sign. | +| `,` | Display a comma in the indicated position. | +| `.` | Marks the location of the decimal point. | +| `0` | Display leading zeros. | +| `9` | Number of significant digits to display. | + + If loss of significant digits occurs due to overflow of the format, then all #’s are displayed. + +`text` + + Text to be used for the column heading of `column`. + +`OFF | ON` + + If `OFF` is specified, formatting options are reverted back to their defaults, but are still available within the session. If `ON` is specified, the formatting options specified by previous `COLUMN` commands for `column` within the session are re-activated. + +The following example shows the effect of changing the display width of the `job` column. + +```text +SQL> SET PAGESIZE 9999 +SQL> COLUMN job FORMAT A5 +SQL> COLUMN job +COLUMN JOB ON +FORMAT A5 +wrapped +SQL> SELECT empno, ename, job FROM emp; + +EMPNO ENAME JOB +----- ---------- ----- + 7369 SMITH CLERK + 7499 ALLEN SALES + MAN + + 7521 WARD SALES + MAN + + 7566 JONES MANAG + ER + + 7654 MARTIN SALES + MAN + + 7698 BLAKE MANAG + ER + + 7782 CLARK MANAG + ER + + 7788 SCOTT ANALY + ST + + 7839 KING PRESI + DENT + + 7844 TURNER SALES + MAN + + 7876 ADAMS CLERK + 7900 JAMES CLERK + 7902 FORD ANALY + ST + + 7934 MILLER CLERK + +14 rows retrieved. +``` + +The following example applies a format to the `sal` column. + +```text +SQL> COLUMN sal FORMAT $99,999.00 +SQL> COLUMN +COLUMN JOB ON +FORMAT A5 +wrapped + +COLUMN SAL ON +FORMAT $99,999.00 +wrapped +SQL> SELECT empno, ename, job, sal FROM emp; + +EMPNO ENAME JOB SAL +----- ---------- ----- ----------- + 7369 SMITH CLERK $800.00 + 7499 ALLEN SALES $1,600.00 + MAN + + 7521 WARD SALES $1,250.00 + MAN + + 7566 JONES MANAG $2,975.00 + ER + + 7654 MARTIN SALES $1,250.00 + MAN + + 7698 BLAKE MANAG $2,850.00 + ER + + 7782 CLARK MANAG $2,450.00 + ER + + 7788 SCOTT ANALY $3,000.00 + ST + + 7839 KING PRESI $5,000.00 + DENT + + 7844 TURNER SALES $1,500.00 + MAN + + 7876 ADAMS CLERK $1,100.00 + 7900 JAMES CLERK $950.00 + 7902 FORD ANALY $3,000.00 + ST + + 7934 MILLER CLERK $1,300.00 + +14 rows retrieved. +``` + +## CONNECT + +Change the database connection to a different user and/or connect to a different database. There must be no white space between any of the parameters following the `CONNECT` command. + +```text +CON[NECT] [/][@{ | } ] +``` + +Where: + + `username` is a database username with which to connect to the database. + + `password` is the password associated with the specified `username`. If a `password` is not provided, but a password is required for authentication, a search is made for a password file, first in the home directory of the Linux operating system account invoking EDB\*Plus (or in the `%APPDATA%\postgresql\` directory for Windows) and then at the location specified by the `PGPASSFILE` environment variable. The password file is `.pgpass` on Linux hosts and `pgpass.conf` on Windows hosts. The following is an example on a Windows host: + +```text +C:\Users\Administrator\AppData\Roaming\postgresql\pgpass.conf +``` + + If a password file cannot be located, or it does not have an entry matching the EDB\*Plus connection parameters, then EDB\*Plus will prompt for the password. For more information about password files, see the PostgreSQL core documentation at: + + + +!!! Note + When a password is not required, EDB\*Plus does not prompt for a password such as when the `trust` authentication method is specified in the `pg_hba.conf` file. + +For more information about the `pg_hba.conf` file and authentication methods, see the PostgreSQL core documentation at + + `connectstring` is the database connection string. See [Using EDB\*Plus](04_using_edb_plus/#using_edb_plus) for further information on the database connection string. + + `variable` is a variable defined in the `login.sql` file that contains a database connection string. The `login.sql` file can be found in the `edbplus` subdirectory of the Advanced Server home directory. + +In the following example, the database connection is changed to database `edb` on the localhost at port `5445` with username `smith`. + +```text +SQL> CONNECT smith/mypassword@localhost:5445/edb +Disconnected from EnterpriseDB Database. +Connected to EnterpriseDB 11.0.1 (localhost:5445/edb) AS smith +``` + +From within the session shown above, the connection is changed to username `enterprisedb`. Also note that the host defaults to the localhost, the port defaults to `5444` (which is not the same as the port previously used), and the database defaults to `edb`. + +```text +SQL> CONNECT enterprisedb/password +Disconnected from EnterpriseDB Database. +Connected to EnterpriseDB 11.0.1 (localhost:5444/edb) AS enterprisedb +``` + +## DEFINE + +The `DEFINE` command creates or replaces the value of a *user variable* (also called a *substitution variable*). + +```text +DEF[INE ] [ variable [ = text ] ] +``` + +If the `DEFINE` command is given without any parameters, all current variables and their values are displayed. + +If `DEFINE variable` is given, only `variable` is displayed with its value. + +`DEFINE variable = text` assigns `text` to `variable.text` may be optionally enclosed within single or double quotation marks. Quotation marks must be used if `text` contains space characters. + +The following example defines two variables, `dept` and `name`. + +```text +SQL> DEFINE dept = 20 +SQL> DEFINE name = 'John Smith' +SQL> DEFINE +DEFINE EDB = "localhost:5445/edb" +DEFINE DEPT = "20" +DEFINE NAME = "John Smith" +``` + +!!! Note + The variable `EDB` is read from the `login.sql` file located in the `edbplus` subdirectory of the Advanced Server home directory. + +## DEL + +`DEL` is a line editor command that deletes one or more lines from the SQL buffer. + +```text +DEL [ n | n m | n * | n L[AST ] | * | * n | * L[AST ] | L[AST ] ] +``` + +The parameters specify which lines are to be deleted from the SQL buffer. Two parameters specify the start and end of a range of lines to be deleted. If the `DEL` command is given with no parameters, the current line is deleted. + +`n` + + n is an integer representing the `n`th line + +`n m` + + `n` and `m` are integers where `m` is greater than `n` representing the `n`th through the `m`th lines + +`*` + + Current line + +`LAST` + + Last line + +In the following example, the fifth and sixth lines containing columns `sal` and `comm`, respectively, are deleted from the `SELECT` command in the SQL buffer. + +```text +SQL> LIST + 1 SELECT + 2 empno + 3 ,ename + 4 ,job + 5 ,sal + 6 ,comm + 7 ,deptno + 8* FROM emp +SQL> DEL 5 6 +SQL> LIST + 1 SELECT + 2 empno + 3 ,ename + 4 ,job + 5 ,deptno + 6* FROM emp +``` + +## DESCRIBE + +The `DESCRIBE` command displays: + +- A list of columns, column data types, and column lengths for a table or view +- A list of parameters for a procedure or function +- A list of procedures and functions and their respective parameters for a package + +The `DESCRIBE` command will also display the structure of the database object referred to by a synonym. The syntax is: + +```text +DESC[RIBE] [ schema.]object +``` + +`schema` + + Name of the schema containing the object to be described. + +`object` + + Name of the table, view, procedure, function, or package to be displayed, or the synonym of an object. + +## DISCONNECT + +The `DISCONNECT` command closes the current database connection, but does not terminate EDB\*Plus. + +```text +DISC[ONNECT ] +``` + +## EDIT + +The `EDIT` command invokes an external editor to edit the contents of an operating system file or the SQL buffer. + +```text +ED[IT ] [ filename[.ext ] ] +``` + +`filename[.ext ]` + + `filename` is the name of the file to open with an external editor. `ext` is the filename extension. If the filename extension is `sql`, then the `.sql` extension may be omitted when specifying `filename`. `EDIT` always assumes a `.sql` extension on filenames that are specified with no extension. If the filename parameter is omitted from the `EDIT` command, the contents of the SQL buffer are brought into the editor. + +## EXECUTE + +The `EXECUTE` command executes an SPL procedure from EDB\*Plus. + +```text +EXEC[UTE ] spl_procedure [ ([ parameters ]) ] +``` + +`spl_procedure` + + The name of the SPL procedure to be executed. + +`parameters` + + Comma-delimited list of parameters. If there are no parameters, then a pair of empty parentheses may optionally be specified. + +## EXIT + +The `EXIT` command terminates the EDB\*Plus session and returns control to the operating system. `QUIT` is a synonym for `EXIT`. Specifying no parameters is equivalent to `EXIT SUCCESS COMMIT`. + +```text +{ EXIT | QUIT } +[ SUCCESS | FAILURE | WARNING | value | variable ] +[ COMMIT | ROLLBACK ]SUCCESS | FAILURE |WARNING] +``` + +Returns an operating system dependent return code indicating successful operation, failure, or warning for `SUCCESS, FAILURE`, and `WARNING`, respectively. The default is `SUCCESS`. + +`value` + + An integer value that is returned as the return code. + +`variable` + + A variable created with the `DEFINE` command whose value is returned as the return code. + +`COMMIT | ROLLBACK` + + If `COMMIT` is specified, uncommitted updates are committed upon exit. If `ROLLBACK` is specified, uncommitted updates are rolled back upon exit. The default is `COMMIT`. + +## GET + +The `GET` command loads the contents of the given file to the SQL buffer. + +```text +GET filename[.ext ] [ LIS[T ] | NOL[IST ] ] +``` + +`filename[.ext ]` + + `filename` is the name of the file to load into the SQL buffer. `ext` is the filename extension. If the filename extension is `sql`, then the `.sql` extension may be omitted when specifying `filename`. `GET` always assumes a `.sql` extension on filenames that are specified with no extension. + +`LIST | NOLIST` + + If `LIST` is specified, the content of the SQL buffer is displayed after the file is loaded. If `NOLIST` is specified, no listing is displayed. The default is `LIST`. + +## HELP + +The `HELP` command obtains an index of topics or help on a specific topic. The question mark `(?)` is synonymous with specifying `HELP`. + +```text +{ HELP | ? } { INDEX | topic } +``` + +`INDEX` + + Displays an index of available topics. + +`topic` + + The name of a specific topic – e.g., an EDB\*Plus command, for which help is desired. + +## HOST + +The `HOST` command executes an operating system command from EDB\*Plus. + +```text +HO[ST ] [os_command] +``` + +`os_command` + + The operating system command to be executed. If you do not provide an operating system command, EDB\*Plus pauses execution and opens a new shell prompt. When the shell exits, EDB\*Plus resumes execution. + +## INPUT + +The `INPUT` line editor command adds a line of text to the SQL buffer after the current line. + +```text +I[NPUT ] text +``` + +The following sequence of `INPUT` commands constructs a `SELECT` command. + +```text +SQL> INPUT SELECT empno, ename, job, sal, comm +SQL> INPUT FROM emp +SQL> INPUT WHERE deptno = 20 +SQL> INPUT ORDER BY empno +SQL> LIST + 1 SELECT empno, ename, job, sal, comm + 2 FROM emp + 3 WHERE deptno = 20 + 4* ORDER BY empno +``` + +## LIST + +`LIST` is a line editor command that displays the contents of the SQL buffer. + +```text +L[IST] [ n | n m | n * | n L[AST] | * | * n | * L[AST] | L[AST] ] +``` + +The buffer does not include a history of the EDB\*Plus commands. + +`n` + + `n` represents the buffer line number. + +`n m` + + `n m` displays a list of lines between `n` and `m`. + +`n *` + + `n *` displays a list of lines that range between line `n` and the current line. + +`n L[AST]` + + `n L[AST]` displays a list of lines that range from line `n` through the last line in the buffer. + +`*` + + `*` displays the current line. + +`* n` + + `* n` displays a list of lines that range from the current line through line `n`. + +`* L[AST]` + + `* L[AST]` displays a list of lines that range from the current line through the last line. + +`L[AST]` + + `L[AST]` displays the last line. + +## PASSWORD + +Use the `PASSWORD` command to change your database password. + +```text +PASSW[ORD] [user_name] +``` + +You must have sufficient privileges to use the `PASSWORD` command to change another user's password. The following example demonstrates using the `PASSWORD` command to change the password for a user named `acctg`: + +```text +SQL> PASSWORD acctg +Changing password for acctg + New password: + New password again: +Password successfully changed. +``` + +## PAUSE + +The `PAUSE` command displays a message, and waits for the user to press `ENTER`. + +```text +PAU[SE] [optional_text] +``` + +`optional_text` specifies the text that will be displayed to the user. If the `optional_text` is omitted, Advanced Server will display two blank lines. If you double quote the `optional_text` string, the quotes will be included in the output. + +## PROMPT + +The `PROMPT` command displays a message to the user before continuing. + +```text +PRO[MPT] [message_text] +``` + +`message_text` specifies the text displayed to the user. Double quote the string to include quotes in the output. + +## QUIT + +The `QUIT` command terminates the session and returns control to the operating system. `QUIT` is a synonym for `EXIT`. + +```text +QUIT + +[SUCCESS | FAILURE | WARNING | value | sub_variable] + +[COMMIT | ROLLBACK] +``` + +The default value is `QUIT SUCCESS COMMIT`. + +## REMARK + +Use `REMARK` to include comments in a script. + +```text +REM[ARK] [optional_text] +``` + +You may also use the following convention to include a comment: + +```text +/* + * This is an example of a three line comment. + */ +``` + +## SAVE + +Use the `SAVE` command to write the SQL Buffer to an operating system file. + +```text +SAV[E] file_name +[CRE[ATE] | REP[LACE] | APP[END]] +``` + +`file_name` + + `file_name` specifies the name of the file (including the path) where the buffer contents are written. If you do not provide a file extension, `.sql` is appended to the end of the file name. + +`CREATE` + + Include the `CREATE` keyword to create a new file. A new file is created *only* if a file with the specified name does not already exist. This is the default. + +`REPLACE` + + Include the `REPLACE` keyword to specify that Advanced Server should overwrite an existing file. + +`APPEND` + + Include the `APPEND` keyword to specify that Advanced Server should append the contents of the SQL buffer to the end of the specified file. + +The following example saves the contents of the SQL buffer to a file named `example.sql`, located in the `temp` directory: + +```text +SQL> SAVE C:\example.sql CREATE +File "example.sql" written. +``` + +## SET + +Use the `SET` command to specify a value for a session level variable that controls EDB\*Plus behavior. The following forms of the `SET` command are valid: + +**SET AUTOCOMMIT** + +Use the `SET AUTOCOMMIT` command to specify commit behavior for Advanced Server transactions. + +```text +SET AUTO[COMMIT] + +{ON | OFF | IMMEDIATE | statement_count} +``` + +Please note that EDB\*Plus always automatically commits DDL statements. + +`ON` + + Specify `ON` to turn `AUTOCOMMIT` behavior on. + +`OFF` + + Specify `OFF` to turn `AUTOCOMMIT` behavior off. + +`IMMEDIATE` + + `IMMEDIATE` has the same effect as `ON`. + +`statement_count` + + Include a value for `statement_count` to instruct EDB\*Plus to issue a commit after the specified count of successful SQL statements. + +**SET COLUMN SEPARATOR** + +Use the `SET COLUMN SEPARATOR` command to specify the text that Advanced Server displays between columns. + +```text +SET COLSEP column_separator +``` + +The default value of `column_separator` is a single space. + +**SET ECHO** + +Use the `SET ECHO` command to specify if SQL and EDB\*Plus script statements should be displayed onscreen as they are executed. + +```text +SET ECHO {ON | OFF} +``` + +The default value is `OFF`. + +**SET FEEDBACK** + +The `SET FEEDBACK` command controls the display of interactive information after a SQL statement executes. + +```text +SET FEED[BACK] {ON | OFF | row_threshold} +``` + +`row_threshold` + + Specify an integer value for `row_threshold`. Setting `row_threshold` to `0` is same as setting `FEEDBACK` to `OFF`. Setting `row_threshold` equal `1` effectively sets `FEEDBACK` to `ON`. + +**SET FLUSH** + +Use the `SET FLUSH` command to control display buffering. + +```text +SET FLU[SH] {ON | OFF} +``` + +Set `FLUSH` to `OFF` to enable display buffering. If you enable buffering, messages bound for the screen may not appear until the script completes. Please note that setting `FLUSH` to `OFF` will offer better performance. + +Set `FLUSH` to `ON` to disable display buffering. If you disable buffering, messages bound for the screen appear immediately. + +**SET HEADING** + +Use the `SET HEADING` variable to specify if Advanced Server should display column headings for `SELECT` statements. + +```text +SET HEA[DING] {ON | OFF} +``` + +**SET HEAD SEPARATOR** + +The `SET HEADSEP` command sets the new heading separator character used by the `COLUMN HEADING` command. The default is `'|'`. + +```text +SET HEADS[EP] +``` + +**SET LINESIZE** + +Use the `SET LINESIZE` command to specify the width of a line in characters. + +```text +SET LIN[ESIZE] width_of_line +``` + +`width_of_line` + + The default value of `width_of_line` is `132`. + +**SET NEWPAGE** + +Use the `SET NEWPAGE` command to specify how many blank lines are printed after a page break. + +```text +SET NEWP[AGE] lines_per_page +``` + +`lines_per_page` + + The default value of `lines_per_page` is `1`. + +**SET NULL** + +Use the `SET NULL` command to specify a string that is displayed to the user when a `NULL` column value is displayed in the output buffer. + +```text +SET NULL null_string +``` + +**SET PAGESIZE** + +Use the `SET PAGESIZE` command to specify the number of printed lines that fit on a page. + +```text +SET PAGES[IZE] line_count +``` + +Use the `line_count` parameter to specify the number of lines per page. + +**SET SQLCASE** + +The `SET SQLCASE` command specifies if SQL statements transmitted to the server should be converted to upper or lower case. + +```text +SET SQLC[ASE] {MIX[ED] | UP[PER] | LO[WER]} +``` + +`UPPER` + + Specify `UPPER` to convert the command text to uppercase. + +`LOWER` + + Specify `LOWER` to convert the command text to lowercase. + +`MIXED` + + Specify `MIXED` to leave the case of SQL commands unchanged. The default is `MIXED`. + +**SET PAUSE** + +The `SET PAUSE` command is most useful when included in a script; the command displays a prompt and waits for the user to press `Return`. + +```text +SET PAU[SE] {ON | OFF} +``` + +If `SET PAUSE` is `ON`, the message `Hit ENTER to continue…` will be displayed before each command is executed. + +**SET SPACE** + +Use the `SET SPACE` command to specify the number of spaces to display between columns: + +```text +SET SPACE number_of_spaces +``` + +**SET SQLPROMPT** + +Use `SET SQLPROMPT` to set a value for a user-interactive prompt: + +```text +SET SQLP[ROMPT] "prompt" +``` + +By default, `SQLPROMPT` is set to `"SQL> "` + +**SET TERMOUT** + +Use the `SET TERMOUT` command to specify if command output should be displayed onscreen. + +```text +SET TERM[OUT] {ON | OFF} +``` + +**SET TIMING** + +The `SET TIMING` command specifies if Advanced Server should display the execution time for each SQL statement after it is executed. + +```text +SET TIMI[NG] {ON | OFF} +``` + +**SET TRIMSPOOL** + +Use the `SET TRIMSPOOL` command to remove trailing spaces from each line in the output file specified by the `SPOOL` command. + +```text +SET TRIMS[POOL] {ON | OFF} +``` + +The default value is `OFF`. + +**SET VERIFY** + +Specifies if both the old and new values of a SQL statement are displayed when a substitution variable is encountered. + +```text +SET VER[IFY] { ON | OFF } +``` + +## SHOW + +Use the `SHOW` command to display current parameter values. + +```text +SHO[W] {ALL | parameter_name} +``` + +Display the current parameter settings by including the `ALL` keyword: + +```Text +SQL> SHOW ALL +autocommit OFF +colsep " " +define "&" +echo OFF +FEEDBACK ON for 6 row(s). +flush ON +heading ON +headsep "|" +linesize 78 +newpage 1 +null " " +pagesize 14 +pause OFF +serveroutput OFF +spool OFF +sqlcase MIXED +sqlprompt "SQL> " +sqlterminator ";" +suffix ".sql" +termout ON +timing OFF +verify ON +USER is "enterprisedb" +HOST is "localhost" +PORT is "5444" +DATABASE is "edb" +VERSION is "11.0.0" +``` + +Or display a specific parameter setting by including the `parameter_name` in the `SHOW` command: + +```text +SQL> SHOW VERSION +VERSION is "11.0.0" +``` + +## SPOOL + +The `SPOOL` command sends output from the display to a file. + +```text +SP[OOL] output_file | OFF +``` + +Use the `output_file` parameter to specify a path name for the output file. + +## START + +Use the `START` command to run an EDB\*Plus script file; `START` is an alias for `@` command. + +```text +STA[RT] script_file +``` + +Specify the name of a script file in the `script_file` parameter. + +## UNDEFINE + +The `UNDEFINE` command erases a user variable created by the `DEFINE` command. + +```text +UNDEF[INE] variable_name [ variable_name...] +``` + +Use the `variable_name` parameter to specify the name of a variable or variables. + +## WHENEVER SQLERROR + +The `WHENEVER SQLERROR` command provides error handling for SQL errors or PL/SQL block errors. The syntax is: + +```text +WHENEVER SQLERROR + {CONTINUE[COMMIT|ROLLBACK|NONE] + |EXIT[SUCCESS|FAILURE|WARNING|n|sub_variable] + [COMMIT|ROLLBACK]} +``` + +If Advanced Server encounters an error during the execution of a SQL command or PL/SQL block, EDB\*Plus performs the action specified in the `WHENEVER SQLERROR` command: + + Include the `CONTINUE` clause to instruct EDB\*Plus to perform the specified action before continuing. + + Include the `COMMIT` clause to instruct EDB\*Plus to `COMMIT` the current transaction before exiting or continuing. + + Include the `ROLLBACK` clause to instruct EDB\*Plus to `ROLLBACK` the current transaction before exiting or continuing. + + Include the `NONE` clause to instruct EDB\*Plus to continue without committing or rolling back the transaction. + + Include the `EXIT` clause to instruct EDB\*Plus to perform the specified action and exit if it encounters an error. + + Use the following options to specify a status code that EDB\*Plus will return before exiting: + +```text +[SUCCESS|FAILURE|WARNING|n|sub_variable] +``` + + Please note that EDB\*Plus supports substitution variables, but does not support bind variables. diff --git a/product_docs/docs/epas/11/edb_plus/images/advanced_server_installation_details.png b/product_docs/docs/epas/11/edb_plus/images/advanced_server_installation_details.png new file mode 100755 index 00000000000..3638e7d551f --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/images/advanced_server_installation_details.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9c152dfd15132f11750917e10b0a8dd3646bfcf5f4d9a3525066f68ebd807d3 +size 19239 diff --git a/product_docs/docs/epas/11/edb_plus/images/edb_logo.png b/product_docs/docs/epas/11/edb_plus/images/edb_logo.png new file mode 100644 index 00000000000..f4a93cf57f5 --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/images/edb_logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07423b012a855204780fe5a2a5a1e33607304a5c3020ae4acbf3d575691dedd6 +size 12136 diff --git a/product_docs/docs/pem/7.16/pem_online_help/images/edb_logo.svg b/product_docs/docs/epas/11/edb_plus/images/edb_logo.svg similarity index 100% rename from product_docs/docs/pem/7.16/pem_online_help/images/edb_logo.svg rename to product_docs/docs/epas/11/edb_plus/images/edb_logo.svg diff --git a/product_docs/docs/epas/11/edb_plus/images/edb_plus_welcome.png b/product_docs/docs/epas/11/edb_plus/images/edb_plus_welcome.png new file mode 100755 index 00000000000..c526c78eabf --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/images/edb_plus_welcome.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f557131b686992e3d8a9f7d0f40ddd14dcdde8a6ee7fb90af3ef57860194024 +size 56911 diff --git a/product_docs/docs/epas/11/edb_plus/images/edb_plus_welcome_1.png b/product_docs/docs/epas/11/edb_plus/images/edb_plus_welcome_1.png new file mode 100755 index 00000000000..ea2a26270ab --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/images/edb_plus_welcome_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b1c56ab042cb3f2ea94a6fc8177bb9176fdd0bc42ca36e23c43594993dd6637 +size 34974 diff --git a/product_docs/docs/epas/11/edb_plus/images/installation_complete.png b/product_docs/docs/epas/11/edb_plus/images/installation_complete.png new file mode 100755 index 00000000000..30281ba5583 --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/images/installation_complete.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3836a996fa7052af22f4a23e24903594d4edf866c3e2696a6cbc0de8f1db4e03 +size 37674 diff --git a/product_docs/docs/epas/11/edb_plus/images/installation_directory.png b/product_docs/docs/epas/11/edb_plus/images/installation_directory.png new file mode 100755 index 00000000000..0e4ab726341 --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/images/installation_directory.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c9a3d397f6c345f80a95362c407c1b72b0414c5934a8e8ef20d6c04a4980469 +size 71722 diff --git a/product_docs/docs/epas/11/edb_plus/images/ready_to_install.png b/product_docs/docs/epas/11/edb_plus/images/ready_to_install.png new file mode 100755 index 00000000000..a73f573adc5 --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/images/ready_to_install.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddd7dc4e8a4a8e40f9194e5f7971d3fc01c9c6130de1d3a62fc528a2a018c666 +size 15785 diff --git a/product_docs/docs/epas/11/edb_plus/index.mdx b/product_docs/docs/epas/11/edb_plus/index.mdx new file mode 100644 index 00000000000..f70ddcd686d --- /dev/null +++ b/product_docs/docs/epas/11/edb_plus/index.mdx @@ -0,0 +1,31 @@ +--- +navTitle: EDB*Plus — CLI +title: "EDB*Plus User's Guide" + +#legacyRedirects: + #- "/edb-docs/p/edbplus/37" + #- "/edb-docs/d/edbplus/user-guides/edbplus-users-guide/37/introduction.html" + #- "/edb-docs/d/edbplus/user-guides/edbplus-users-guide/37/conclusion.html" + +directoryDefaults: + description: "EDB*Plus Documentation and release notes." +--- + +This guide describes how to connect to an Advanced Server database using EDB\*Plus. EDB\*Plus provides a command line user interface to EDB Postgres Advanced Server that accepts SQL commands that allow you to: + +- Query certain database objects +- Execute stored procedures +- Format output from SQL commands +- Execute batch scripts +- Execute OS commands +- Record output + +For detailed information about the features supported by Advanced Server, consult the complete library of Advanced Server guides available at: + + + +
+ +introduction edb_plus installing_edb_plus using_edb_plus using_edb_plus_with_ssl command_summary conclusion + +
diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/01_package_components.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/01_package_components.mdx new file mode 100644 index 00000000000..c9121f99266 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/01_package_components.mdx @@ -0,0 +1,380 @@ +--- +title: "Package Components" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/package_components.html" +--- + +Packages consist of two main components: + +- The *package specification*: This is the public interface, (these are the elements which can be referenced outside the package). We declare all database objects that are to be a part of our package within the specification. +- The *package body*: This contains the actual implementation of all the database objects declared within the package specification. + +The package body implements the specifications in the package specification. It contains implementation details and private declarations which are invisible to the application. You can debug, enhance or replace a package body without changing the specifications. Similarly, you can change the body without recompiling the calling programs because the implementation details are invisible to the application. + +## Package Specification Syntax + +The package specification defines the user interface for a package (the API). The specification lists the functions, procedures, types, exceptions and cursors that are visible to a user of the package. + +The syntax used to define the interface for a package is: + +```text +CREATE [ OR REPLACE ] PACKAGE + [ ] + { IS | AS } + [ ; ] ... + [ ] ... +END [ ] ; +``` + +Where `authorization_clause` := + +```text +{ AUTHID DEFINER } | { AUTHID CURRENT_USER } +``` + +Where `procedure_or_function_declaration` := + +```text +procedure_declaration | function_declaration +``` + +Where `procedure_declaration` := + +```text +PROCEDURE proc_name [ argument_list ]; +[ restriction_pragma; ] +``` + +Where `function_declaration` := + +```text +FUNCTION func_name [ argument_list ] + RETURN rettype [ DETERMINISTIC ]; +[ restriction_pragma; ] +``` + +Where `argument_list` := + +```text +( argument_declaration [, ...] ) +``` + +Where `argument_declaration` := + +```text +argname [ IN | IN OUT | OUT ] argtype [ DEFAULT value ] +``` + +Where `restriction_pragma` := + +```text +PRAGMA RESTRICT_REFERENCES(name, restrictions) +``` + +Where `restrictions` := + +```text +restriction [, ... ] +``` + +**Parameters** + +`package_name` + + `package_name` is an identifier assigned to the package - each package must have a name unique within the schema. + +`AUTHID DEFINER` + + If you omit the `AUTHID` clause or specify `AUTHID DEFINER`, the privileges of the package owner are used to determine access privileges to database objects. + +`AUTHID CURRENT_USER` + + If you specify `AUTHID CURRENT_USER`, the privileges of the current user executing a program in the package are used to determine access privileges. + +`declaration` + + `declaration` is an identifier of a public variable. A public variable can be accessed from outside of the package using the syntax `package_name.variable`. There can be zero, one, or more public variables. Public variable definitions must come before procedure or function declarations. + + `declaration` can be any of the following: + +- Variable Declaration +- Record Declaration +- Collection Declaration +- `REF CURSOR` and Cursor Variable Declaration +- `TYPE` Definitions for Records, Collections, and `REF CURSORs` +- Exception +- Object Variable Declaration + +`proc_name` + + The name of a public procedure. + +`argname` + + The name of an argument. The argument is referenced by this name within the function or procedure body. + +`IN | IN OUT | OUT` + + The argument mode. `IN` declares the argument for input only. This is the default. `IN OUT` allows the argument to receive a value as well as return a value. `OUT` specifies the argument is for output only. + +`argtype` + + The data type(s) of an argument. An argument type may be a base data type, a copy of the type of an existing column using `%TYPE`, or a user-defined type such as a nested table or an object type. A length must not be specified for any base type - for example, specify `VARCHAR2`, not `VARCHAR2(10`). + + The type of a column is referenced by writing `tablename.columnname` `%TYPE`; using this can sometimes help make a procedure independent from changes to the definition of a table. + +`DEFAULT value` + + The `DEFAULT` clause supplies a default value for an input argument if one is not supplied in the invocation. `DEFAULT` may not be specified for arguments with modes `IN OUT` or `OUT`. + +`func_name` + + The name of a public function. + +`rettype` + + The return data type. + +`DETERMINISTIC` + + `DETERMINISTIC` is a synonym for `IMMUTABLE`. A `DETERMINISTIC` function cannot modify the database and always reaches the same result when given the same argument values; it does not do database lookups or otherwise use information not directly present in its argument list. If you include this clause, any call of the function with all-constant arguments can be immediately replaced with the function value. + +`restriction` + + The following keywords are accepted for compatibility and ignored: + + `RNDS` + + `RNPS` + + `TRUST` + + `WNDS` + + `WNPS` + +## Package Body Syntax + +Package implementation details reside in the package body; the package body may contain objects that are not visible to the package user. Advanced Server supports the following syntax for the package body: + +```text +CREATE [ OR REPLACE ] PACKAGE BODY + { IS | AS } + [ ; ] ... + [ ] ... + [ ] +END [ ] ; +``` + +Where `procedure_or_function_definition` := + +```text +procedure_definition | function_definition +``` + +Where `procedure_definition` := + +```text +PROCEDURE proc_name[ argument_list ] + [ options_list ] + { IS | AS } + procedure_body + END [ proc_name ] ; +``` + +Where `procedure_body` := + +```text +[ PRAGMA AUTONOMOUS_TRANSACTION; ] +[ declaration; ] [, ...] +BEGIN + statement; [...] +[ EXCEPTION + { WHEN exception [OR exception] [...]] THEN statement; } + [...] +] +``` + +Where `function_definition` := + +```text +FUNCTION func_name [ argument_list ] + RETURN rettype [ DETERMINISTIC ] + [ options_list ] + { IS | AS } + function_body + END [ func_name ] ; +``` + +Where `function_body` := + +```text +[ PRAGMA AUTONOMOUS_TRANSACTION; ] +[ declaration; ] [, ...] +BEGIN + statement; [...] +[ EXCEPTION + { WHEN exception [ OR exception ] [...] THEN statement; } + [...] +] +``` + +Where `argument_list` := + +```text +( argument_declaration [, ...] ) +``` + +Where `argument_declaration` := + +```text +argname [ IN | IN OUT | OUT ] argtype [ DEFAULT value ] +``` + +Where `options_list` := + +```text +option [ ... ] +``` + +Where `option` := + +```text +STRICT +LEAKPROOF +PARALLEL { UNSAFE | RESTRICTED | SAFE } +COST execution_cost +ROWS result_rows +SET config_param { TO value | = value | FROM CURRENT } +``` + +Where `package_initializer` := + +```text +BEGIN + statement; [...] +END; +``` + +**Parameters** + +`package_name` + + `package_name` is the name of the package for which this is the package body. There must be an existing package specification with this name. + +`private_declaration` + + `private_declaration` is an identifier of a private variable that can be accessed by any procedure or function within the package. There can be zero, one, or more private variables. `private_declaration` can be any of the following: + +- Variable Declaration +- Record Declaration +- Collection Declaration +- `REF CURSOR` and Cursor Variable Declaration +- `TYPE` Definitions for Records, Collections, and `REF CURSORs` +- Exception +- Object Variable Declaration + +`proc_name` + + The name of the procedure being created. + +`PRAGMA AUTONOMOUS_TRANSACTION` + + `PRAGMA AUTONOMOUS_TRANSACTION` is the directive that sets the procedure as an autonomous transaction. + +`declaration` + + A variable, type, `REF CURSOR`, or subprogram declaration. If subprogram declarations are included, they must be declared after all other variable, type, and `REF CURSOR` declarations. + +`statement` + + An SPL program statement. Note that a `DECLARE - BEGIN - END` block is considered an SPL statement unto itself. Thus, the function body may contain nested blocks. + +`exception` + + An exception condition name such as `NO_DATA_FOUND, OTHERS`, etc. + +`func_name` + + The name of the function being created. + +`rettype` + + The return data type, which may be any of the types listed for `argtype`. As for `argtype`, a length must not be specified for `rettype`. + +`DETERMINISTIC` + + Include `DETERMINISTIC` to specify that the function will always return the same result when given the same argument values. A `DETERMINISTIC` function must not modify the database. + + **Note**: The `DETERMINISTIC` keyword is equivalent to the PostgreSQL `IMMUTABLE` option. + + **Note**: If `DETERMINISTIC` is specified for a public function in the package body, it must also be specified for the function declaration in the package specification. (For private functions, there is no function declaration in the package specification.) + +`PRAGMA AUTONOMOUS_TRANSACTION` + + `PRAGMA AUTONOMOUS_TRANSACTION` is the directive that sets the function as an autonomous transaction. + +`argname` + + The name of a formal argument. The argument is referenced by this name within the procedure body. + +`IN | IN OUT | OUT` + + The argument mode. `IN` declares the argument for input only. This is the default. `IN OUT` allows the argument to receive a value as well as return a value. `OUT` specifies the argument is for output only. + +`argtype` + + The data type(s) of an argument. An argument type may be a base data type, a copy of the type of an existing column using `%TYPE`, or a user-defined type such as a nested table or an object type. A length must not be specified for any base type - for example, specify `VARCHAR2`, not `VARCHAR2(10)`. + + The type of a column is referenced by writing `tablename.columnname%TYPE`; using this can sometimes help make a procedure independent from changes to the definition of a table. + +`DEFAULT value` + + The `DEFAULT` clause supplies a default value for an input argument if one is not supplied in the procedure call. `DEFAULT` may not be specified for arguments with modes `IN OUT` or `OUT`. + + Please note: The following options are not compatible with Oracle databases; they are extensions to Oracle package syntax provided by Advanced Server only. + +`STRICT` + + The `STRICT` keyword specifies that the function will not be executed if called with a `NULL` argument; instead the function will return `NULL`. + +`LEAKPROOF` + + The `LEAKPROOF` keyword specifies that the function will not reveal any information about arguments, other than through a return value. + +`PARALLEL { UNSAFE | RESTRICTED | SAFE }` + + The `PARALLEL` clause enables the use of parallel sequential scans (parallel mode). A parallel sequential scan uses multiple workers to scan a relation in parallel during a query in contrast to a serial sequential scan. + + When set to `UNSAFE`, the procedure or function cannot be executed in parallel mode. The presence of such a procedure or function forces a serial execution plan. This is the default setting if the `PARALLEL` clause is omitted. + + When set to `RESTRICTED`, the procedure or function can be executed in parallel mode, but the execution is restricted to the parallel group leader. If the qualification for any particular relation has anything that is parallel restricted, that relation won't be chosen for parallelism. + + When set to `SAFE`, the procedure or function can be executed in parallel mode with no restriction. + +`execution_cost` + + `execution_cost` specifies a positive number giving the estimated execution cost for the function, in units of `cpu_operator_cost`. If the function returns a set, this is the cost per returned row. The default is `0.0025`. + +`result_rows` + + `result_rows` is the estimated number of rows that the query planner should expect the function to return. The default is `1000`. + +`SET` + + Use the `SET` clause to specify a parameter value for the duration of the function: + + `config_param` specifies the parameter name. + + `value` specifies the parameter value. + + `FROM CURRENT` guarantees that the parameter value is restored when the function ends. + +`package_initializer` + + The statements in the `package_initializer` are executed once per user’s session when the package is first referenced. + +!!! Note + The `STRICT, LEAKPROOF, PARALLEL, COST, ROWS` and `SET` keywords provide extended functionality for Advanced Server and are not supported by Oracle. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/02_creating_packages.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/02_creating_packages.mdx new file mode 100644 index 00000000000..d009f08287b --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/02_creating_packages.mdx @@ -0,0 +1,136 @@ +--- +title: "Creating Packages" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/creating_packages.html" +--- + +A package is not an executable piece of code; rather it is a repository of code. When you use a package, you actually execute or make reference to an element within a package. + +## Creating the Package Specification + +The package specification contains the definition of all the elements in the package that can be referenced from outside of the package. These are called the public elements of the package, and they act as the package interface. The following code sample is a package specification: + +```text +-- +-- Package specification for the 'emp_admin' package. +-- +CREATE OR REPLACE PACKAGE emp_admin +IS + FUNCTION get_dept_name ( + p_deptno NUMBER DEFAULT 10 + ) + RETURN VARCHAR2; + FUNCTION update_emp_sal ( + p_empno NUMBER, + p_raise NUMBER + ) + RETURN NUMBER; + PROCEDURE hire_emp ( + p_empno NUMBER, + p_ename VARCHAR2, + p_job VARCHAR2, + p_sal NUMBER, + p_hiredate DATE DEFAULT sysdate, + p_comm NUMBER DEFAULT 0, + p_mgr NUMBER, + p_deptno NUMBER DEFAULT 10 + ); + PROCEDURE fire_emp ( + p_empno NUMBER + ); +END emp_admin; +``` + +This code sample creates the `emp_admin` package specification. This package specification consists of two functions and two stored procedures. We can also add the `OR REPLACE` clause to the `CREATE PACKAGE` statement for convenience. + +## Creating the Package Body + +The body of the package contains the actual implementation behind the package specification. For the above `emp_admin` package specification, we shall now create a package body which will implement the specifications. The body will contain the implementation of the functions and stored procedures in the specification. + +```text +-- +-- Package body for the 'emp_admin' package. +-- +CREATE OR REPLACE PACKAGE BODY emp_admin +IS + -- + -- Function that queries the 'dept' table based on the department + -- number and returns the corresponding department name. + -- + FUNCTION get_dept_name ( + p_deptno IN NUMBER DEFAULT 10 + ) + RETURN VARCHAR2 + IS + v_dname VARCHAR2(14); + BEGIN + SELECT dname INTO v_dname FROM dept WHERE deptno = p_deptno; + RETURN v_dname; + EXCEPTION + WHEN NO_DATA_FOUND THEN + DBMS_OUTPUT.PUT_LINE('Invalid department number ' || p_deptno); + RETURN ''; + END; + -- + -- Function that updates an employee's salary based on the + -- employee number and salary increment/decrement passed + -- as IN parameters. Upon successful completion the function + -- returns the new updated salary. + -- + FUNCTION update_emp_sal ( + p_empno IN NUMBER, + p_raise IN NUMBER + ) + RETURN NUMBER + IS + v_sal NUMBER := 0; + BEGIN + SELECT sal INTO v_sal FROM emp WHERE empno = p_empno; + v_sal := v_sal + p_raise; + UPDATE emp SET sal = v_sal WHERE empno = p_empno; + RETURN v_sal; + EXCEPTION + WHEN NO_DATA_FOUND THEN + DBMS_OUTPUT.PUT_LINE('Employee ' || p_empno || ' not found'); + RETURN -1; + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('The following is SQLERRM:'); + DBMS_OUTPUT.PUT_LINE(SQLERRM); + DBMS_OUTPUT.PUT_LINE('The following is SQLCODE:'); + DBMS_OUTPUT.PUT_LINE(SQLCODE); + RETURN -1; + END; + -- + -- Procedure that inserts a new employee record into the 'emp' table. + -- + PROCEDURE hire_emp ( + p_empno NUMBER, + p_ename VARCHAR2, + p_job VARCHAR2, + p_sal NUMBER, + p_hiredate DATE DEFAULT sysdate, + p_comm NUMBER DEFAULT 0, + p_mgr NUMBER, + p_deptno NUMBER DEFAULT 10 + ) + AS + BEGIN + INSERT INTO emp(empno, ename, job, sal, hiredate, comm, mgr, deptno) + VALUES(p_empno, p_ename, p_job, p_sal, + p_hiredate, p_comm, p_mgr, p_deptno); + END; + -- + -- Procedure that deletes an employee record from the 'emp' table based + -- on the employee number. + -- + PROCEDURE fire_emp ( + p_empno NUMBER + ) + AS + BEGIN + DELETE FROM emp WHERE empno = p_empno; + END; +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/03_referencing_a_package.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/03_referencing_a_package.mdx new file mode 100644 index 00000000000..c2f3c6b5106 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/03_referencing_a_package.mdx @@ -0,0 +1,23 @@ +--- +title: "Referencing a Package" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/referencing_a_package.html" +--- + +To reference the types, items and subprograms that are declared within a package specification, we use the dot notation. For example: + +`package_name.type_name` + +`package_name.item_name` + +`package_name.subprogram_name` + +To invoke a function from the `emp_admin` package specification, we will execute the following SQL command. + +```text +SELECT emp_admin.get_dept_name(10) FROM DUAL; +``` + +Here we are invoking the `get_dept_name` function declared within the package `emp_admin`. We are passing the department number as an argument to the function, which will return the name of the department. Here the value returned should be `ACCOUNTING`, which corresponds to department number `10`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/04_using_packages_with_user_defined_types.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/04_using_packages_with_user_defined_types.mdx new file mode 100644 index 00000000000..328388a8908 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/04_using_packages_with_user_defined_types.mdx @@ -0,0 +1,182 @@ +--- +title: "Using Packages With User Defined Types" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/using_packages_with_user_defined_types.html" +--- + +The following example incorporates the various user-defined types discussed in earlier chapters within the context of a package. + +The package specification of `emp_rpt` shows the declaration of a record type, `emprec_typ`, and a weakly-typed `REF CURSOR, emp_refcur`, as publicly accessible along with two functions and two procedures. Function, `open_emp_by_dept`, returns the `REF CURSOR` type, `EMP_REFCUR`. Procedures, `fetch_emp` and `close_refcur`, both declare a weakly-typed `REF CURSOR` as a formal parameter. + +```text +CREATE OR REPLACE PACKAGE emp_rpt +IS + TYPE emprec_typ IS RECORD ( + empno NUMBER(4), + ename VARCHAR(10) + ); + TYPE emp_refcur IS REF CURSOR; + + FUNCTION get_dept_name ( + p_deptno IN NUMBER + ) RETURN VARCHAR2; + FUNCTION open_emp_by_dept ( + p_deptno IN emp.deptno%TYPE + ) RETURN EMP_REFCUR; + PROCEDURE fetch_emp ( + p_refcur IN OUT SYS_REFCURSOR + ); + PROCEDURE close_refcur ( + p_refcur IN OUT SYS_REFCURSOR + ); +END emp_rpt; +``` + +The package body shows the declaration of several private variables - a static cursor, `dept_cur`, a table type, `depttab_typ`, a table variable, `t_dept`, an integer variable, `t_dept_max`, and a record variable, `r_emp`. + +```text +CREATE OR REPLACE PACKAGE BODY emp_rpt +IS + CURSOR dept_cur IS SELECT * FROM dept; + TYPE depttab_typ IS TABLE of dept%ROWTYPE + INDEX BY BINARY_INTEGER; + t_dept DEPTTAB_TYP; + t_dept_max INTEGER := 1; + r_emp EMPREC_TYP; + + FUNCTION get_dept_name ( + p_deptno IN NUMBER + ) RETURN VARCHAR2 + IS + BEGIN + FOR i IN 1..t_dept_max LOOP + IF p_deptno = t_dept(i).deptno THEN + RETURN t_dept(i).dname; + END IF; + END LOOP; + RETURN 'Unknown'; + END; + + FUNCTION open_emp_by_dept( + p_deptno IN emp.deptno%TYPE + ) RETURN EMP_REFCUR + IS + emp_by_dept EMP_REFCUR; + BEGIN + OPEN emp_by_dept FOR SELECT empno, ename FROM emp + WHERE deptno = p_deptno; + RETURN emp_by_dept; + END; + + PROCEDURE fetch_emp ( + p_refcur IN OUT SYS_REFCURSOR + ) + IS + BEGIN + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH p_refcur INTO r_emp; + EXIT WHEN p_refcur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(r_emp.empno || ' ' || r_emp.ename); + END LOOP; + END; + + PROCEDURE close_refcur ( + p_refcur IN OUT SYS_REFCURSOR + ) + IS + BEGIN + CLOSE p_refcur; + END; +BEGIN + OPEN dept_cur; + LOOP + FETCH dept_cur INTO t_dept(t_dept_max); + EXIT WHEN dept_cur%NOTFOUND; + t_dept_max := t_dept_max + 1; + END LOOP; + CLOSE dept_cur; + t_dept_max := t_dept_max - 1; +END emp_rpt; +``` + +This package contains an initialization section that loads the private table variable, `t_dept`, using the private static cursor, `dept_cur.t_dept` serves as a department name lookup table in function, `get_dept_name`. + +Function, `open_emp_by_dept` returns a `REF CURSOR` variable for a result set of employee numbers and names for a given department. This `REF CURSOR` variable can then be passed to procedure, `fetch_emp`, to retrieve and list the individual rows of the result set. Finally, procedure, `close_refcur`, can be used to close the `REF CURSOR` variable associated with this result set. + +The following anonymous block runs the package function and procedures. In the anonymous block's declaration section, note the declaration of cursor variable, `v_emp_cur`, using the package’s public `REF CURSOR` type, `EMP_REFCUR. v_emp_cur` contains the pointer to the result set that is passed between the package function and procedures. + +```text +DECLARE + v_deptno dept.deptno%TYPE DEFAULT 30; + v_emp_cur emp_rpt.EMP_REFCUR; +BEGIN + v_emp_cur := emp_rpt.open_emp_by_dept(v_deptno); + DBMS_OUTPUT.PUT_LINE('EMPLOYEES IN DEPT #' || v_deptno || + ': ' || emp_rpt.get_dept_name(v_deptno)); + emp_rpt.fetch_emp(v_emp_cur); + DBMS_OUTPUT.PUT_LINE('**********************'); + DBMS_OUTPUT.PUT_LINE(v_emp_cur%ROWCOUNT || ' rows were retrieved'); + emp_rpt.close_refcur(v_emp_cur); +END; +``` + +The following is the result of this anonymous block. + +```text +EMPLOYEES IN DEPT #30: SALES +EMPNO ENAME +----- ------- +7499 ALLEN +7521 WARD +7654 MARTIN +7698 BLAKE +7844 TURNER +7900 JAMES +********************** +6 rows were retrieved +``` + +The following anonymous block illustrates another means of achieving the same result. Instead of using the package procedures, `fetch_emp` and `close_refcur`, the logic of these programs is coded directly into the anonymous block. In the anonymous block’s declaration section, note the addition of record variable, `r_emp`, declared using the package’s public record type, `EMPREC_TYP`. + +```text +DECLARE + v_deptno dept.deptno%TYPE DEFAULT 30; + v_emp_cur emp_rpt.EMP_REFCUR; + r_emp emp_rpt.EMPREC_TYP; +BEGIN + v_emp_cur := emp_rpt.open_emp_by_dept(v_deptno); + DBMS_OUTPUT.PUT_LINE('EMPLOYEES IN DEPT #' || v_deptno || + ': ' || emp_rpt.get_dept_name(v_deptno)); + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME'); + DBMS_OUTPUT.PUT_LINE('----- -------'); + LOOP + FETCH v_emp_cur INTO r_emp; + EXIT WHEN v_emp_cur%NOTFOUND; + DBMS_OUTPUT.PUT_LINE(r_emp.empno || ' ' || + r_emp.ename); + END LOOP; + DBMS_OUTPUT.PUT_LINE('**********************'); + DBMS_OUTPUT.PUT_LINE(v_emp_cur%ROWCOUNT || ' rows were retrieved'); + CLOSE v_emp_cur; +END; +``` + +The following is the result of this anonymous block. + +```text +EMPLOYEES IN DEPT #30: SALES +EMPNO ENAME +----- ------- +7499 ALLEN +7521 WARD +7654 MARTIN +7698 BLAKE +7844 TURNER +7900 JAMES +********************** +6 rows were retrieved +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/05_dropping_a_package.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/05_dropping_a_package.mdx new file mode 100644 index 00000000000..6713ac7641a --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/05_dropping_a_package.mdx @@ -0,0 +1,27 @@ +--- +title: "Dropping a Package" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dropping_a_package.html" +--- + +The syntax for deleting an entire package or just the package body is as follows: + +```text +DROP PACKAGE [ BODY ] package_name; +``` + +If the keyword, `BODY`, is omitted, both the package specification and the package body are deleted - i.e., the entire package is dropped. If the keyword, `BODY`, is specified, then only the package body is dropped. The package specification remains intact. `package_name` is the identifier of the package to be dropped. + +Following statement will destroy only the package body of `emp_admin`: + +```text +DROP PACKAGE BODY emp_admin; +``` + +The following statement will drop the entire `emp_admin` package: + +```text +DROP PACKAGE emp_admin; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/index.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/index.mdx new file mode 100644 index 00000000000..47d206de094 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/02_packages/index.mdx @@ -0,0 +1,20 @@ +--- +title: "Packages" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/packages.html" +--- + +This chapter discusses the concept of packages in Advanced Server. A *package* is a named collection of functions, procedures, variables, cursors, user-defined record types, and records that are referenced using a common qualifier – the package identifier. Packages have the following characteristics: + +- Packages provide a convenient means of organizing the functions and procedures that perform a related purpose. Permission to use the package functions and procedures is dependent upon one privilege granted to the entire package. All of the package programs must be referenced with a common name. +- Certain functions, procedures, variables, types, etc. in the package can be declared as *public*. Public entities are visible and can be referenced by other programs that are given `EXECUTE` privilege on the package. For public functions and procedures, only their signatures are visible - the program names, parameters if any, and return types of functions. The SPL code of these functions and procedures is not accessible to others, therefore applications that utilize a package are dependent only upon the information available in the signature – not in the procedural logic itself. +- Other functions, procedures, variables, types, etc. in the package can be declared as *private*. Private entities can be referenced and used by function and procedures within the package, but not by other external applications. Private entities are for use only by programs within the package. +- Function and procedure names can be overloaded within a package. One or more functions/procedures can be defined with the same name, but with different signatures. This provides the capability to create identically named programs that perform the same job, but on different types of input. + +
+ +package_components creating_packages referencing_a_package using_packages_with_user_defined_types dropping_a_package + +
diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/01_dbms_alert.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/01_dbms_alert.mdx new file mode 100644 index 00000000000..9c6a8b84620 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/01_dbms_alert.mdx @@ -0,0 +1,395 @@ +--- +title: "DBMS_ALERT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_alert.html" +--- + +The `DBMS_ALERT` package provides the capability to register for, send, and receive alerts. The following table lists the supported procedures: + +| Function/Procedure | Return Type | Description | +| ----------------------------------------------------- | ----------- | --------------------------------------------------- | +| `REGISTER(name)` | n/a | Register to be able to receive alerts named, `name` | +| `REMOVE(name)` | n/a | Remove registration for the alert named, `name` | +| `REMOVEALL` | n/a | Remove registration for all alerts. | +| `SIGNAL(name, message)` | n/a | Signals the alert named, `name`, with `message` | +| `WAITANY(name OUT, message OUT, status OUT, timeout)` | n/a | Wait for any registered alert to occur. | +| `WAITONE(name, message OUT, status OUT, timeout)` | n/a | Wait for the specified alert, `name`, to occur. | + +Advanced Server's implementation of `DBMS_ALERT` is a partial implementation when compared to Oracle's version. Only those functions and procedures listed in the table above are supported. + +Advanced Server allows a maximum of `500` concurrent alerts. You can use the `dbms_alert.max_alerts` GUC variable (located in the `postgresql.conf` file) to specify the maximum number of concurrent alerts allowed on a system. + +To set a value for the `dbms_alert.max_alerts` variable, open the `postgresql.conf` file (located by default in `/opt/PostgresPlus/11AS/data`) with your choice of editor, and edit the `dbms_alert.max_alerts` parameter as shown: + +```text +dbms_alert.max_alerts = alert_count +``` + +`alert_count` + +`alert_count` specifies the maximum number of concurrent alerts. By default, the value of `dbms_alert.max_alerts` is `100`. To disable this feature, set `dbms_alert.max_alerts` to `0`. + +For the `dbms_alert.max_alerts` GUC to function correctly, the `custom_variable_classes` parameter must contain `dbms_alerts`: + +```text +custom_variable_classes = 'dbms_alert, …' +``` + +After editing the `postgresql.conf` file parameters, you must restart the server for the changes to take effect. + +## REGISTER + +The `REGISTER` procedure enables the current session to be notified of the specified alert. + +```text +REGISTER( VARCHAR2) +``` + +**Parameters** + +`name` + + Name of the alert to be registered. + +**Examples** + +The following anonymous block registers for an alert named, `alert_test`, then waits for the signal. + +```text +DECLARE + v_name VARCHAR2(30) := 'alert_test'; + v_msg VARCHAR2(80); + v_status INTEGER; + v_timeout NUMBER(3) := 120; +BEGIN + DBMS_ALERT.REGISTER(v_name); + DBMS_OUTPUT.PUT_LINE('Registered for alert ' || v_name); + DBMS_OUTPUT.PUT_LINE('Waiting for signal...'); + DBMS_ALERT.WAITONE(v_name,v_msg,v_status,v_timeout); + DBMS_OUTPUT.PUT_LINE('Alert name : ' || v_name); + DBMS_OUTPUT.PUT_LINE('Alert msg : ' || v_msg); + DBMS_OUTPUT.PUT_LINE('Alert status : ' || v_status); + DBMS_OUTPUT.PUT_LINE('Alert timeout: ' || v_timeout || ' seconds'); + DBMS_ALERT.REMOVE(v_name); +END; + +Registered for alert alert_test +Waiting for signal... +``` + +## REMOVE + +The `REMOVE` procedure unregisters the session for the named alert. + +```text +REMOVE( VARCHAR2) +``` + +**Parameters** + +`name` + + Name of the alert to be unregistered. + +## REMOVEALL + +The `REMOVEALL` procedure unregisters the session for all alerts. + +```text +REMOVEALL +``` + +## SIGNAL + +The `SIGNAL` procedure signals the occurrence of the named alert. + +```text +SIGNAL( VARCHAR2, VARCHAR2) +``` + +**Parameters** + +`name` + + Name of the alert. + +`message` + + Information to pass with this alert. + +**Examples** + +The following anonymous block signals an alert for `alert_test`. + +```text +DECLARE + v_name VARCHAR2(30) := 'alert_test'; +BEGIN + DBMS_ALERT.SIGNAL(v_name,'This is the message from ' || v_name); + DBMS_OUTPUT.PUT_LINE('Issued alert for ' || v_name); +END; +Issued alert for alert_test +``` + +## WAITANY + +The `WAITANY` procedure waits for any of the registered alerts to occur. + +```text +WAITANY( OUT VARCHAR2, OUT VARCHAR2, + OUT INTEGER, NUMBER) +``` + +**Parameters** + +`name` + + Variable receiving the name of the alert. + +`message` + + Variable receiving the message sent by the `SIGNAL` procedure. + +`status` + + Status code returned by the operation. Possible values are: 0 – alert occurred; 1 – timeout occurred. + +`timeout` + + Time to wait for an alert in seconds. + +**Examples** + +The following anonymous block uses the `WAITANY` procedure to receive an alert named, `alert_test` or `any_alert`: + +```text +DECLARE + v_name VARCHAR2(30); + v_msg VARCHAR2(80); + v_status INTEGER; + v_timeout NUMBER(3) := 120; +BEGIN + DBMS_ALERT.REGISTER('alert_test'); + DBMS_ALERT.REGISTER('any_alert'); + DBMS_OUTPUT.PUT_LINE('Registered for alert alert_test and any_alert'); + DBMS_OUTPUT.PUT_LINE('Waiting for signal...'); + DBMS_ALERT.WAITANY(v_name,v_msg,v_status,v_timeout); + DBMS_OUTPUT.PUT_LINE('Alert name : ' || v_name); + DBMS_OUTPUT.PUT_LINE('Alert msg : ' || v_msg); + DBMS_OUTPUT.PUT_LINE('Alert status : ' || v_status); + DBMS_OUTPUT.PUT_LINE('Alert timeout: ' || v_timeout || ' seconds'); + DBMS_ALERT.REMOVEALL; +END; + +Registered for alert alert_test and any_alert +Waiting for signal... +``` + +An anonymous block in a second session issues a signal for `any_alert`: + +```text +DECLARE + v_name VARCHAR2(30) := 'any_alert'; +BEGIN + DBMS_ALERT.SIGNAL(v_name,'This is the message from ' || v_name); + DBMS_OUTPUT.PUT_LINE('Issued alert for ' || v_name); +END; + +Issued alert for any_alert +``` + +Control returns to the first anonymous block and the remainder of the code is executed: + +```text +Registered for alert alert_test and any_alert +Waiting for signal... +Alert name : any_alert +Alert msg : This is the message from any_alert +Alert status : 0 +Alert timeout: 120 seconds +``` + +## WAITONE + +The `WAITONE` procedure waits for the specified registered alert to occur. + +```text +WAITONE( VARCHAR2, OUT VARCHAR2, + OUT INTEGER, NUMBER) +``` + +**Parameters** + +`name` + + Name of the alert. + +`message` + + Variable receiving the message sent by the `SIGNAL` procedure. + +`status` + + Status code returned by the operation. Possible values are: 0 – alert occurred; 1 – timeout occurred. + +`timeout` + + Time to wait for an alert in seconds. + +**Examples** + +The following anonymous block is similar to the one used in the `WAITANY` example except the `WAITONE` procedure is used to receive the alert named, `alert_test`. + +```text +DECLARE + v_name VARCHAR2(30) := 'alert_test'; + v_msg VARCHAR2(80); + v_status INTEGER; + v_timeout NUMBER(3) := 120; +BEGIN + DBMS_ALERT.REGISTER(v_name); + DBMS_OUTPUT.PUT_LINE('Registered for alert ' || v_name); + DBMS_OUTPUT.PUT_LINE('Waiting for signal...'); + DBMS_ALERT.WAITONE(v_name,v_msg,v_status,v_timeout); + DBMS_OUTPUT.PUT_LINE('Alert name : ' || v_name); + DBMS_OUTPUT.PUT_LINE('Alert msg : ' || v_msg); + DBMS_OUTPUT.PUT_LINE('Alert status : ' || v_status); + DBMS_OUTPUT.PUT_LINE('Alert timeout: ' || v_timeout || ' seconds'); + DBMS_ALERT.REMOVE(v_name); +END; + +Registered for alert alert_test +Waiting for signal... +``` + +Signal sent for `alert_test` sent by an anonymous block in a second session: + +```text +DECLARE + v_name VARCHAR2(30) := 'alert_test'; +BEGIN + DBMS_ALERT.SIGNAL(v_name,'This is the message from ' || v_name); + DBMS_OUTPUT.PUT_LINE('Issued alert for ' || v_name); +END; + +Issued alert for alert_test +``` + +First session is alerted, control returns to the anonymous block, and the remainder of the code is executed: + +```text +Registered for alert alert_test +Waiting for signal... +Alert name : alert_test +Alert msg : This is the message from alert_test +Alert status : 0 +Alert timeout: 120 seconds +``` + +## Comprehensive Example + +The following example uses two triggers to send alerts when the `dept` table or the `emp` table is changed. An anonymous block listens for these alerts and displays messages when an alert is received. + +The following are the triggers on the `dept` and `emp` tables: + +```text +CREATE OR REPLACE TRIGGER dept_alert_trig + AFTER INSERT OR UPDATE OR DELETE ON dept +DECLARE + v_action VARCHAR2(25); +BEGIN + IF INSERTING THEN + v_action := ' added department(s) '; + ELSIF UPDATING THEN + v_action := ' updated department(s) '; + ELSIF DELETING THEN + v_action := ' deleted department(s) '; + END IF; + DBMS_ALERT.SIGNAL('dept_alert',USER || v_action || 'on ' || + SYSDATE); +END; + +CREATE OR REPLACE TRIGGER emp_alert_trig + AFTER INSERT OR UPDATE OR DELETE ON emp +DECLARE + v_action VARCHAR2(25); +BEGIN + IF INSERTING THEN + v_action := ' added employee(s) '; + ELSIF UPDATING THEN + v_action := ' updated employee(s) '; + ELSIF DELETING THEN + v_action := ' deleted employee(s) '; + END IF; + DBMS_ALERT.SIGNAL('emp_alert',USER || v_action || 'on ' || + SYSDATE); +END; +``` + +The following anonymous block is executed in a session while updates to the `dept` and `emp` tables occur in other sessions: + +```text +DECLARE + v_dept_alert VARCHAR2(30) := 'dept_alert'; + v_emp_alert VARCHAR2(30) := 'emp_alert'; + v_name VARCHAR2(30); + v_msg VARCHAR2(80); + v_status INTEGER; + v_timeout NUMBER(3) := 60; +BEGIN + DBMS_ALERT.REGISTER(v_dept_alert); + DBMS_ALERT.REGISTER(v_emp_alert); + DBMS_OUTPUT.PUT_LINE('Registered for alerts dept_alert and emp_alert'); + DBMS_OUTPUT.PUT_LINE('Waiting for signal...'); + LOOP + DBMS_ALERT.WAITANY(v_name,v_msg,v_status,v_timeout); + EXIT WHEN v_status != 0; + DBMS_OUTPUT.PUT_LINE('Alert name : ' || v_name); + DBMS_OUTPUT.PUT_LINE('Alert msg : ' || v_msg); + DBMS_OUTPUT.PUT_LINE('Alert status : ' || v_status); + DBMS_OUTPUT.PUT_LINE('------------------------------------' || + '-------------------------'); + END LOOP; + DBMS_OUTPUT.PUT_LINE('Alert status : ' || v_status); + DBMS_ALERT.REMOVEALL; +END; + +Registered for alerts dept_alert and emp_alert +Waiting for signal... +``` + +The following changes are made by user, mary: + +```text +INSERT INTO dept VALUES (50,'FINANCE','CHICAGO'); +INSERT INTO emp (empno,ename,deptno) VALUES (9001,'JONES',50); +INSERT INTO emp (empno,ename,deptno) VALUES (9002,'ALICE',50); +``` + +The following change is made by user, john: + +```text +INSERT INTO dept VALUES (60,'HR','LOS ANGELES'); +``` + +The following is the output displayed by the anonymous block receiving the signals from the triggers: + +```text +Registered for alerts dept_alert and emp_alert +Waiting for signal... +Alert name : dept_alert +Alert msg : mary added department(s) on 25-OCT-07 16:41:01 +Alert status : 0 +------------------------------------------------------------- +Alert name : emp_alert +Alert msg : mary added employee(s) on 25-OCT-07 16:41:02 +Alert status : 0 +------------------------------------------------------------- +Alert name : dept_alert +Alert msg : john added department(s) on 25-OCT-07 16:41:22 +Alert status : 0 +------------------------------------------------------------- +Alert status : 1 +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/01_enqueue.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/01_enqueue.mdx new file mode 100644 index 00000000000..7487f87c635 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/01_enqueue.mdx @@ -0,0 +1,120 @@ +--- +title: "ENQUEUE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/enqueue.html" +--- + +The `ENQUEUE` procedure adds an entry to a queue. The signature is: + +```text +ENQUEUE( + IN VARCHAR2, + IN DBMS_AQ.ENQUEUE_OPTIONS_T, + IN DBMS_AQ.MESSAGE_PROPERTIES_T, + IN , + OUT RAW) +``` + +**Parameters** + +`queue_name` + + The name (optionally schema-qualified) of an existing queue. If you omit the schema name, the server will use the schema specified in the `SEARCH_PATH`. Please note that unlike Oracle, unquoted identifiers are converted to lower case before storing. To include special characters or use a case-sensitive name, enclose the name in double quotes. + + For detailed information about creating a queue, see `DBMS_AQADM.CREATE_QUEUE`. + +`enqueue_options` + + `enqueue_options` is a value of the type, `enqueue_options_t`: + +```text +DBMS_AQ.ENQUEUE_OPTIONS_T IS RECORD( + visibility BINARY_INTEGER DEFAULT ON_COMMIT, + relative_msgid RAW(16) DEFAULT NULL, + sequence_deviation BINARY INTEGER DEFAULT NULL, + transformation VARCHAR2(61) DEFAULT NULL, + delivery_mode PLS_INTEGER NOT NULL DEFAULT PERSISTENT); +``` + +Currently, the only supported parameter values for `enqueue_options_t` are: + +| `visibility` | `ON_COMMIT`. | +| -------------------- | ------------ | +| `delivery_mode` | `PERSISTENT` | +| `sequence_deviation` | `NULL` | +| `transformation` | `NULL` | +| `relative_msgid` | `NULL` | + +`message_properties` + + `message_properties` is a value of the type, `message_properties_t`: + +```text +message_properties_t IS RECORD( + priority INTEGER, + delay INTEGER, + expiration INTEGER, + correlation CHARACTER VARYING(128) COLLATE pg_catalog.”C”, + attempts INTEGER, + recipient_list “AQ$_RECIPIENT_LIST_T”, + exception_queue CHARACTER VARYING(61) COLLATE pg_catalog.”C”, + enqueue_time TIMESTAMP WITHOUT TIME ZONE, + state INTEGER, + original_msgid BYTEA, + transaction_group CHARACTER VARYING(30) COLLATE pg_catalog.”C”, + delivery_mode INTEGER +DBMS_AQ.PERSISTENT); +``` + +The supported values for `message_properties_t` are: + +| | | +| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `priority` | If the queue table definition includes a `sort_list` that references `priority`, this parameter affects the order that messages are dequeued. A lower value indicates a higher dequeue priority. | +| `delay` | Specify the number of seconds that will pass before a message is available for dequeueing or `NO_DELAY`. | +| `expiration` | Use the expiration parameter to specify the number of seconds until a message expires. | +| `correlation` | Use correlation to specify a message that will be associated with the entry; the default is `NULL`. | +| `attempts` | This is a system-maintained value that specifies the number of attempts to dequeue the message. | +| `recipient_list` | This parameter is not supported. | +| `exception_queue` | Use the `exception_queue` parameter to specify the name of an exception queue to which a message will be moved if it expires or is dequeued by a transaction that rolls back too many times. | +| `enqueue_time` | `enqueue_time` is the time the record was added to the queue; this value is provided by the system. | +| `state` | This parameter is maintained by DBMS_AQ; state can be:

`DBMS_AQ.WAITING` – the delay has not been reached.

`DBMS_AQ.READY` – the queue entry is ready for processing.

`DBMS_AQ.PROCESSED` – the queue entry has been processed.

`DBMS_AQ.EXPIRED` – the queue entry has been moved to the exception queue. | +| `original_msgid` | This parameter is accepted for compatibility and ignored. | +| `transaction_group` | This parameter is accepted for compatibility and ignored. | +| `delivery_mode` | This parameter is not supported; specify a value of `DBMS_AQ.PERSISTENT`. | + +`payload` + + Use the `payload` parameter to provide the data that will be associated with the queue entry. The payload type must match the type specified when creating the corresponding queue table (see `DBMS_AQADM.CREATE_QUEUE_TABLE`). + +`msgid` + + Use the `msgid` parameter to retrieve a unique (system-generated) message identifier. + +**Example** + +The following anonymous block calls `DBMS_AQ.ENQUEUE`, adding a message to a queue named `work_order`: + +```text +DECLARE + + enqueue_options DBMS_AQ.ENQUEUE_OPTIONS_T; + message_properties DBMS_AQ.MESSAGE_PROPERTIES_T; + message_handle raw(16); + payload work_order; + +BEGIN + + payload := work_order('Smith', 'system upgrade'); + +DBMS_AQ.ENQUEUE( + queue_name => 'work_order', + enqueue_options => enqueue_options, + message_properties => message_properties, + payload => payload, + msgid => message_handle + ); + END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/02_dequeue.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/02_dequeue.mdx new file mode 100644 index 00000000000..4a45c6c09e5 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/02_dequeue.mdx @@ -0,0 +1,132 @@ +--- +title: "DEQUEUE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dequeue.html" +--- + +The `DEQUEUE` procedure dequeues a message. The signature is: + +```text +DEQUEUE( + IN VARCHAR2, + IN DBMS_AQ.DEQUEUE_OPTIONS_T, + OUT DBMS_AQ.MESSAGE_PROPERTIES_T, + OUT , + OUT RAW) +``` + +**Parameters** + +`queue_name` + + The name (optionally schema-qualified) of an existing queue. If you omit the schema name, the server will use the schema specified in the `SEARCH_PATH`. Please note that unlike Oracle, unquoted identifiers are converted to lower case before storing. To include special characters or use a case-sensitive name, enclose the name in double quotes. + + For detailed information about creating a queue, see `DBMS_AQADM.CREATE_QUEUE`. + +`dequeue_options` is a value of the type, `dequeue_options_t`: + +```text +DEQUEUE_OPTIONS_T IS RECORD( + consumer_name CHARACTER VARYING(30), + dequeue_mode INTEGER, + navigation INTEGER, + visibility INTEGER, + wait INTEGER, + msgid BYTEA, + correlation CHARACTER VARYING(128), + deq_condition CHARACTER VARYING(4000), + transformation CHARACTER VARYING(61), + delivery_mode INTEGER); +``` + +Currently, the supported parameter values for `dequeue_options_t` are: + +| | | +| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `consumer_name` | Must be `NULL`. | +| `dequeue_mode` | The locking behavior of the dequeue operation. Must be either:

`DBMS_AQ.BROWSE` – Read the message without obtaining a lock.

`DBMS_AQ.LOCKED` – Read the message after acquiring a lock.

`DBMS_AQ.REMOVE` – Read the message before deleting the message.

`DBMS_AQ.REMOVE_NODATA` – Read the message, but do not delete the message. | +| `navigation` | Identifies the message that will be retrieved. Must be either:

`FIRST_MESSAGE` – The first message within the queue that matches the search term.

`NEXT_MESSAGE` – The next message that is available that matches the first term. | +| `visibility` | Must be `ON_COMMIT` – if you roll back the current transaction the dequeued item will remain in the queue. | +| `wait` | Must be a number larger than 0, or:

`DBMS_AQ.FOREVER` – Wait indefinitely.

`DBMS_AQ.NO_WAIT` – Do not wait. | +| `msgid` | The message ID of the message that will be dequeued. | +| `correlation` | Accepted for compatibility, and ignored. | +| `deq_condition` | A `VARCHAR2` expression that evaluates to a `BOOLEAN` value indicating if the message should be dequeued. | +| `transformation` | Accepted for compatibility, and ignored. | +| `delivery_mode` | Must be `PERSISTENT`; buffered messages are not supported at this time. | + +`message_properties` is a value of the type, `message_properties_t`: + +```text +message_properties_t IS RECORD( + priority INTEGER, + delay INTEGER, + expiration INTEGER, + correlation CHARACTER VARYING(128) COLLATE pg_catalog.”C”, + attempts INTEGER, + recipient_list “AQ$_RECIPIENT_LIST_T”, + exception_queue CHARACTER VARYING(61) COLLATE pg_catalog.”C”, + enqueue_time TIMESTAMP WITHOUT TIME ZONE, + state INTEGER, + original_msgid BYTEA, + transaction_group CHARACTER VARYING(30) COLLATE pg_catalog.”C”, + delivery_mode INTEGER +DBMS_AQ.PERSISTENT); +``` + +The supported values for `message_properties_t` are: + +| | | +| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `priority` | If the queue table definition includes a `sort_list` that references `priority`, this parameter affects the order that messages are dequeued. A lower value indicates a higher dequeue priority. | +| `delay` | Specify the number of seconds that will pass before a message is available for dequeueing or `NO_DELAY`. | +| `expiration` | Use the expiration parameter to specify the number of seconds until a message expires. | +| `correlation` | Use correlation to specify a message that will be associated with the entry; the default is `NULL`. | +| `attempts` | This is a system-maintained value that specifies the number of attempts to dequeue the message. | +| `recipient_list` | This parameter is not supported. | +| `exception_queue` | Use the `exception_queue` parameter to specify the name of an exception queue to which a message will be moved if it expires or is dequeued by a transaction that rolls back too many times. | +| `enqueue_time` | `enqueue_time` is the time the record was added to the queue; this value is provided by the system. | +| `state` | This parameter is maintained by DBMS_AQ; state can be:

`DBMS_AQ.WAITING` – the delay has not been reached.

`DBMS_AQ.READY` – the queue entry is ready for processing.

`DBMS_AQ.PROCESSED` – the queue entry has been processed.

`DBMS_AQ.EXPIRED` – the queue entry has been moved to the exception queue. | +| `original_msgid` | This parameter is accepted for compatibility and ignored. | +| `transaction_group` | This parameter is accepted for compatibility and ignored. | +| `delivery_mode` | This parameter is not supported; specify a value of `DBMS_AQ.PERSISTENT`. | + +`payload` + + Use the `payload` parameter to retrieve the payload of a message with a dequeue operation. The payload type must match the type specified when creating the queue table. + +`msgid` + + Use the `msgid` parameter to retrieve a unique message identifier. + +**Example** + +The following anonymous block calls `DBMS_AQ.DEQUEUE`, retrieving a message from the queue and a payload: + +```text +DECLARE + + dequeue_options DBMS_AQ.DEQUEUE_OPTIONS_T; + message_properties DBMS_AQ.MESSAGE_PROPERTIES_T; + message_handle raw(16); + payload work_order; + +BEGIN + dequeue_options.dequeue_mode := DBMS_AQ.BROWSE; + + DBMS_AQ.DEQUEUE( + queue_name => 'work_queue', + dequeue_options => dequeue_options, + message_properties => message_properties, + payload => payload, + msgid => message_handle + ); + + DBMS_OUTPUT.PUT_LINE( + 'The next work order is [' || payload.subject || '].' + ); +END; +``` + +The payload is displayed by `DBMS_OUTPUT.PUT_LINE`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/03_register.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/03_register.mdx new file mode 100644 index 00000000000..ac2bcd2df1d --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/03_register.mdx @@ -0,0 +1,62 @@ +--- +title: "REGISTER" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/register.html" +--- + +Use the `REGISTER` procedure to register an email address, procedure or URL that will be notified when an item is enqueued or dequeued. The signature is: + +```text +REGISTER( + IN SYS.AQ$_REG_INFO_LIST, + IN NUMBER) +``` + +**Parameters** + +`reg_list` is a list of type `AQ$_REG_INFO_LIST`; that provides information about each subscription that you would like to register. Each entry within the list is of the type `AQ$_REG_INFO`, and may contain: + +
+ +
+ +| Attribute | Type | Description | +| ----------- | --------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | VARCHAR2 (128) | The (optionally schema-qualified) name of the subscription. | +| `namespace` | NUMERIC | The only supported value is `DBMS_AQ.NAMESPACE_AQ (0)` | +| `callback` | VARCHAR2 (4000) | Describes the action that will be performed upon notification. Currently, only calls to PL/SQL procedures are supported. The call should take the form:

`plsql://schema.procedure`

Where:

schema specifies the schema in which the procedure resides.

procedure specifies the name of the procedure that will be notified. | +| `context` | RAW (16) | Any user-defined value required by the procedure. | + +`count` + + `count` is the number of entries in `reg_list`. + +**Example** + +The following anonymous block calls `DBMS_AQ.REGISTER`, registering procedures that will be notified when an item is added to or removed from a queue. A set of attributes (of `sys.aq$_reg_info` type) is provided for each subscription identified in the `DECLARE` section: + +```text +DECLARE + subscription1 sys.aq$_reg_info; + subscription2 sys.aq$_reg_info; + subscription3 sys.aq$_reg_info; + subscriptionlist sys.aq$_reg_info_list; +BEGIN + subscription1 := sys.aq$_reg_info('q', DBMS_AQ.NAMESPACE_AQ, +'plsql://assign_worker?PR=0',HEXTORAW('FFFF')); + subscription2 := sys.aq$_reg_info('q', DBMS_AQ.NAMESPACE_AQ, +'plsql://add_to_history?PR=1',HEXTORAW('FFFF')); + subscription3 := sys.aq$_reg_info('q', DBMS_AQ.NAMESPACE_AQ, +'plsql://reserve_parts?PR=2',HEXTORAW('FFFF')); + + subscriptionlist := sys.aq$_reg_info_list(subscription1, +subscription2, subscription3); + dbms_aq.register(subscriptionlist, 3); + commit; + END; + / +``` + +The `subscriptionlist` is of type `sys.aq$_reg_info_list`, and contains the previously described `sys.aq$_reg_info` objects. The list name and an object count are passed to `dbms_aq.register`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/04_unregister.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/04_unregister.mdx new file mode 100644 index 00000000000..762b412c003 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/04_unregister.mdx @@ -0,0 +1,64 @@ +--- +title: "UNREGISTER" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/unregister.html" +--- + +Use the `UNREGISTER` procedure to turn off notifications related to enqueueing and dequeueing. The signature is: + +```text +UNREGISTER( + IN SYS.AQ$_REG_INFO_LIST, + IN NUMBER) +``` + +**Parameter** + +`reg_list` + +`reg_list` is a list of type `AQ$_REG_INFO_LIST`; that provides information about each subscription that you would like to register. Each entry within the list is of the type `AQ$_REG_INFO`, and may contain: + +
+ +
+ +| Attribute | Type | Description | +| ----------- | --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | VARCHAR2 (128) | The (optionally schema-qualified) name of the subscription. | +| `namespace` | NUMERIC | The only supported value is `DBMS_AQ.NAMESPACE_AQ (0)` | +| `callback` | VARCHAR2 (4000) | Describes the action that will be performed upon notification. Currently, only calls to PL/SQL procedures are supported. The call should take the form:

`plsql://schema.procedure`

Where:

schema specifies the schema in which the procedure resides.

procedure specifies the name of the procedure that will be notified. | +| `context` | RAW (16) | Any user-defined value required by the procedure. | + +`count` + + `count` is the number of entries in `reg_list`. + +**Example** + +The following anonymous block calls `DBMS_AQ.UNREGISTER`, disabling the notifications specified in the example for `DBMS_AQ.REGISTER`: + +```text +DECLARE + subscription1 sys.aq$_reg_info; + subscription2 sys.aq$_reg_info; + subscription3 sys.aq$_reg_info; + subscriptionlist sys.aq$_reg_info_list; +BEGIN + subscription1 := sys.aq$_reg_info('q', DBMS_AQ.NAMESPACE_AQ, +'plsql://assign_worker?PR=0',HEXTORAW('FFFF')); + subscription2 := sys.aq$_reg_info('q', DBMS_AQ.NAMESPACE_AQ, +'plsql://add_to_history?PR=1',HEXTORAW('FFFF')); + subscription3 := sys.aq$_reg_info('q', DBMS_AQ.NAMESPACE_AQ, +'plsql://reserve_parts?PR=2',HEXTORAW('FFFF')); + + subscriptionlist := sys.aq$_reg_info_list(subscription1, +subscription2, subscription3); + dbms_aq.unregister(subscriptionlist, 3); + commit; + END; + / +``` + +The `subscriptionlist` is of type `sys.aq$_reg_info_list`, and contains the previously described `sys.aq$_reg_info` objects. The list name and an object count are passed to `dbms_aq.unregister`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/index.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/index.mdx new file mode 100644 index 00000000000..e5ec1b8142b --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/02_dbms_aq/index.mdx @@ -0,0 +1,73 @@ +--- +title: "DBMS_AQ" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_aq.html" +--- + +EDB Postgres Advanced Server Advanced Queueing provides message queueing and message processing for the Advanced Server database. User-defined messages are stored in a queue; a collection of queues is stored in a queue table. Procedures in the `DBMS_AQADM` package create and manage message queues and queue tables. Use the `DBMS_AQ` package to add messages to a queue or remove messages from a queue, or register or unregister a PL/SQL callback procedure. + +Advanced Server also provides extended (non-compatible) functionality for the `DBMS_AQ` package with SQL commands, see the *Database Compatibility for Oracle Developers SQL Guide* for detailed information about the following SQL commands: + +- `ALTER QUEUE` +- `ALTER QUEUE TABLE` +- `CREATE QUEUE` +- `CREATE QUEUE TABLE` +- `DROP QUEUE` +- `DROP QUEUE TABLE` + +The `DBMS_AQ` package provides procedures that allow you to enqueue a message, dequeue a message, and manage callback procedures. The supported procedures are: + +| Function/Procedure | Return Type | Description | +| ------------------ | ----------- | ------------------------------------------------------------------ | +| `ENQUEUE` | n/a | Post a message to a queue. | +| `DEQUEUE` | n/a | Retrieve a message from a queue if or when a message is available. | +| `REGISTER` | n/a | Register a callback procedure. | +| `UNREGISTER` | n/a | Unregister a callback procedure. | + +Advanced Server's implementation of `DBMS_AQ` is a partial implementation when compared to Oracle's version. Only those procedures listed in the table above are supported. + +Advanced Server supports use of the constants listed below: + +| Constant | Description | For Parameters | +| --------------------------------- | --------------------------------------------------------------------------------- | ------------------------------------------------------------ | +| `DBMS_AQ.BROWSE (0)` | Read the message without locking. | `dequeue_options_t.dequeue_mode` | +| `DBMS_AQ.LOCKED (1)` | This constant is defined, but will return an error if used. | `dequeue_options_t.dequeue_mode` | +| `DBMS_AQ.REMOVE (2)` | Delete the message after reading; the default. | `dequeue_options_t.dequeue_mode` | +| `DBMS_AQ.REMOVE_NODATA (3)` | This constant is defined, but will return an error if used. | `dequeue_options_t.dequeue_mode` | +| `DBMS_AQ.FIRST_MESSAGE (0)` | Return the first available message that matches the search terms. | `dequeue_options_t.navigation` | +| `DBMS_AQ.NEXT_MESSAGE (1)` | Return the next available message that matches the search terms. | `dequeue_options_t.navigation` | +| `DBMS_AQ.NEXT_TRANSACTION (2)` | This constant is defined, but will return an error if used. | `dequeue_options_t.navigation` | +| `DBMS_AQ.FOREVER (0)` | Wait forever if a message that matches the search term is not found, the default. | `dequeue_options_t.wait` | +| `DBMS_AQ.NO_WAIT (1)` | Do not wait if a message that matches the search term is not found. | `dequeue_options_t.wait` | +| `DBMS_AQ.ON_COMMIT (0)` | The dequeue is part of the current transaction. | `enqueue_options_t.visibility, dequeue_options_t.visibility` | +| `DBMS_AQ.IMMEDIATE (1)` | This constant is defined, but will return an error if used. | `enqueue_options_t.visibility, dequeue_options_t.visibility` | +| `DBMS_AQ.PERSISTENT (0)` | The message should be stored in a table. | `enqueue_options_t.delivery_mode` | +| `DBMS_AQ.BUFFERED (1)` | This constant is defined, but will return an error if used. | `enqueue_options_t.delivery_mode` | +| `DBMS_AQ.READY (0)` | Specifies that the message is ready to process. | `message_properties_t.state` | +| `DBMS_AQ.WAITING (1)` | Specifies that the message is waiting to be processed. | `message_properties_t.state` | +| `DBMS_AQ.PROCESSED (2)` | Specifies that the message has been processed. | `message_properties_t.state` | +| `DBMS_AQ.EXPIRED (3)` | Specifies that the message is in the exception queue. | `message_properties_t.state` | +| `DBMS_AQ.NO_DELAY (0)` | This constant is defined, but will return an error if used | `message_properties_t.delay` | +| `DBMS_AQ.NEVER (NULL)` | This constant is defined, but will return an error if used | `message_properties_t.expiration` | +| `DBMS_AQ.NAMESPACE_AQ (0)` | Accept notifications from `DBMS_AQ` queues. | `sys.aq$_reg_info.namespace` | +| `DBMS_AQ.NAMESPACE_ANONYMOUS (1)` | This constant is defined, but will return an error if used | `sys.aq$_reg_info.namespace` | + +The `DBMS_AQ` configuration parameters listed in the following table can be defined in the `postgresql.conf` file. After the configuration parameters are defined, you can invoke the `DBMS_AQ` package to use and manage messages held in queues and queue tables. + +| Parameter | Description | +| ----------------------------- | ------------------------------------------------------------------------------------------------ | +| `dbms_aq.max_workers` | The maximum number of workers to run. | +| `dbms_aq.max_idle_time` | The idle time a worker must wait before exiting. | +| `dbms_aq.min_work_time` | The minimum time a worker can run before exiting. | +| `dbms_aq.launch_delay` | The minimum time between creating workers. | +| `dbms_aq.batch_size` | The maximum number of messages to process in a single transaction. The default batch size is 10. | +| `dbms_aq.max_databases` | The size of `DBMS_AQ`’s hash table of databases. The default value is 1024. | +| `dbms_aq.max_pending_retries` | The size of `DBMS_AQ`’s hash table of pending retries. The default value is 1024. | + +
+ +enqueue dequeue register unregister + +
diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/01_alter_queue.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/01_alter_queue.mdx new file mode 100644 index 00000000000..eaa16d06d9d --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/01_alter_queue.mdx @@ -0,0 +1,52 @@ +--- +title: "ALTER_QUEUE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/alter_queue.html" +--- + +Use the `ALTER_QUEUE` procedure to modify an existing queue. The signature is: + +```text +ALTER_QUEUE( + IN NUMBER DEFAULT NULL, + IN NUMBER DEFAULT 0 + IN NUMBER DEFAULT 0, + IN BOOLEAN DEFAULT TRUE) + IN VARCHAR2 DEFAULT NULL, +``` + +**Parameters** + +`queue_name` + + The name of the new queue. + +`max_retries` + + `max_retries` specifies the maximum number of attempts to remove a message with a dequeue statement. The value of `max_retries` is incremented with each `ROLLBACK` statement. When the number of failed attempts reaches the value specified by `max_retries`, the message is moved to the exception queue. Specify `0` to indicate that no retries are allowed. + +`retry_delay` + + `retry_delay` specifies the number of seconds until a message is scheduled for re-processing after a `ROLLBACK`. Specify `0` to indicate that the message should be retried immediately (the default). + +`retention_time` + + `retention_time` specifies the length of time (in seconds) that a message will be stored after being dequeued. You can also specify `0` (the default) to indicate the message should not be retained after dequeueing, or `INFINITE` to retain the message forever. + +`auto_commit` + + This parameter is accepted for compatibility and ignored. + +`comment` + + `comment` specifies a comment associated with the queue. + +**Example** + +The following command alters a queue named `work_order`, setting the `retry_delay` parameter to 5 seconds: + +```text +EXEC DBMS_AQADM.ALTER_QUEUE(queue_name => 'work_order', retry_delay => 5); +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/02_alter_queue_table.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/02_alter_queue_table.mdx new file mode 100644 index 00000000000..2301819a81b --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/02_alter_queue_table.mdx @@ -0,0 +1,47 @@ +--- +title: "ALTER_QUEUE_TABLE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/alter_queue_table.html" +--- + +Use the `ALTER_QUEUE_TABLE` procedure to modify an existing queue table. The signature is: + +```text +ALTER_QUEUE_TABLE ( + IN VARCHAR2, + IN VARCHAR2 DEFAULT NULL, + IN BINARY_INTEGER DEFAULT 0, + IN BINARY_INTEGER DEFAULT 0, +``` + +**Parameters** + +`queue_table` + + The (optionally schema-qualified) name of the queue table. + +`comment` + + Use the `comment` parameter to provide a comment about the queue table. + +`primary_instance` + + `primary_instance` is accepted for compatibility and stored, but is ignored. + +`secondary_instance` + + `secondary_instance` is accepted for compatibility, but is ignored. + +**Example** + +The following command modifies a queue table named `work_order_table`: + +```text +EXEC DBMS_AQADM.ALTER_QUEUE_TABLE + (queue_table => 'work_order_table', comment => 'This queue table +contains work orders for the shipping department.'); +``` + +The queue table is named `work_order_table`; the command adds a comment to the definition of the queue table. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/03_create_queue.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/03_create_queue.mdx new file mode 100644 index 00000000000..801c3e81d94 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/03_create_queue.mdx @@ -0,0 +1,75 @@ +--- +title: "CREATE_QUEUE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/create_queue.html" +--- + +Use the `CREATE_QUEUE` procedure to create a queue in an existing queue table. The signature is: + +```text +CREATE_QUEUE( + IN VARCHAR2 + IN VARCHAR2, + IN BINARY_INTEGER DEFAULT NORMAL_QUEUE, + IN NUMBER DEFAULT 5, + IN NUMBER DEFAULT 0 + IN NUMBER DEFAULT 0, + IN BOOLEAN DEFAULT FALSE, + IN VARCHAR2 DEFAULT NULL, + IN BOOLEAN DEFAULT TRUE) +``` + +**Parameters** + +`queue_name` + + The name of the new queue. + +`queue_table` + + The name of the table in which the new queue will reside. + +`queue_type` + + The type of the new queue. The valid values for `queue_type` are: + + `DBMS_AQADM.NORMAL_QUEUE` – This value specifies a normal queue (the default). + + `DBMS_AQADM.EXCEPTION_QUEUE` – This value specifies that the new queue is an exception queue. An exception queue will support only dequeue operations. + +`max_retries` + + `max_retries` specifies the maximum number of attempts to remove a message with a dequeue statement. The value of `max_retries` is incremented with each `ROLLBACK` statement. When the number of failed attempts reaches the value specified by `max_retries`, the message is moved to the exception queue. The default value for a system table is `0`; the default value for a user created table is `5`. + +`retry_delay` + + `retry_delay` specifies the number of seconds until a message is scheduled for re-processing after a `ROLLBACK`. Specify `0` to indicate that the message should be retried immediately (the default). + +`retention_time` + + `retention_time` specifies the length of time (in seconds) that a message will be stored after being dequeued. You can also specify `0` (the default) to indicate the message should not be retained after dequeueing, or `INFINITE` to retain the message forever. + +`dependency_tracking` + + This parameter is accepted for compatibility and ignored. + +`comment` + + `comment` specifies a comment associated with the queue. + +`auto_commit` + + This parameter is accepted for compatibility and ignored. + +**Example** + +The following anonymous block creates a queue named `work_order` in the `work_order_table` table: + +```text +BEGIN +DBMS_AQADM.CREATE_QUEUE ( queue_name => 'work_order', queue_table => +'work_order_table', comment => 'This queue contains pending work orders.'); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/04_create_queue_table.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/04_create_queue_table.mdx new file mode 100644 index 00000000000..932a4a61e11 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/04_create_queue_table.mdx @@ -0,0 +1,117 @@ +--- +title: "CREATE_QUEUE_TABLE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/create_queue_table.html" +--- + +Use the `CREATE_QUEUE_TABLE` procedure to create a queue table. The signature is: + +```text +CREATE_QUEUE_TABLE ( + IN VARCHAR2, + IN VARCHAR2, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN BOOLEAN DEFAULT FALSE, + IN BINARY_INTEGER DEFAULT NONE, + IN VARCHAR2 DEFAULT NULL, + IN BOOLEAN DEFAULT TRUE, + IN BINARY_INTEGER DEFAULT 0, + IN BINARY_INTEGER DEFAULT 0, + IN VARCHAR2 DEFAULT NULL, + IN BOOLEAN DEFAULT FALSE) +``` + +**Parameters** + +`queue_table` + + The (optionally schema-qualified) name of the queue table. + +`queue_payload_type` + + The user-defined type of the data that will be stored in the queue table. Please note that to specify a `RAW` data type, you must create a user-defined type that identifies a `RAW` type. + +`storage_clause` + +Use the `storage_clause` parameter to specify attributes for the queue table. Please note that only the `TABLESPACE` option is enforced; all others are accepted for compatibility and ignored. Use the `TABLESPACE` clause to specify the name of a tablespace in which the table will be created. + + `storage_clause` may be one or more of the following: + +```text +TABLESPACE tablespace_name, PCTFREE integer, PCTUSED integer, +INITRANS integer, MAXTRANS integer or STORAGE storage_option. +``` + +`storage_option` may be one or more of the following: + +```text +MINEXTENTS integer, MAXEXTENTS integer, PCTINCREASE integer, INITIAL +size_clause, NEXT, FREELISTS integer, OPTIMAL size_clause, BUFFER_ +POOL {KEEP|RECYCLE|DEFAULT}. +``` + +`sort_list` + + `sort_list` controls the dequeueing order of the queue; specify the names of the column(s) that will be used to sort the queue (in ascending order). The currently accepted values are the following combinations of `enq_time` and `priority`: + +- `enq_time, priority` + +- `priority, enq_time` + +- `priority` + +- `enq_time` + +`multiple_consumers` + + If specified, `message_consumers` must be `FALSE`. + +`message_grouping` + + If specified, `message_grouping` must be `NONE`. + +`comment` + + Use the `comment` parameter to provide a comment about the queue table. + +`auto_commit` + + `auto_commit` is accepted for compatibility, but is ignored. + +`primary_instance` + + `primary_instance` is accepted for compatibility and stored, but is ignored. + +`secondary_instance` + + `secondary_instance` is accepted for compatibility, but is ignored. + +`compatible` + + `compatible` is accepted for compatibility, but is ignored. + +`secure` + + `secure` is accepted for compatibility, but is ignored. + +**Example** + +The following anonymous block first creates a type (`work_order`) with attributes that hold a name (a `VARCHAR2`), and a project description (a `TEXT`). The block then uses that type to create a queue table: + +```text +BEGIN + +CREATE TYPE work_order AS (name VARCHAR2, project TEXT, completed BOOLEAN); + +EXEC DBMS_AQADM.CREATE_QUEUE_TABLE + (queue_table => 'work_order_table', + queue_payload_type => 'work_order', + comment => 'Work order message queue table'); + +END; +``` + +The queue table is named `work_order_table`, and contains a payload of a type `work_order`. A comment notes that this is the `Work order message queue table`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/05_drop_queue.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/05_drop_queue.mdx new file mode 100644 index 00000000000..72b5c30d47c --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/05_drop_queue.mdx @@ -0,0 +1,35 @@ +--- +title: "DROP_QUEUE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/drop_queue.html" +--- + +Use the `DROP_QUEUE` procedure to delete a queue. The signature is: + +```text +DROP_QUEUE( + IN VARCHAR2, + IN BOOLEAN DEFAULT TRUE) +``` + +**Parameters** + +`queue_name` + + The name of the queue that you wish to drop. + +`auto_commit` + + `auto_commit` is accepted for compatibility, but is ignored. + +**Example** + +The following anonymous block drops the queue named `work_order`: + +```text +BEGIN +DBMS_AQADM.DROP_QUEUE(queue_name => 'work_order'); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/06_drop_queue_table.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/06_drop_queue_table.mdx new file mode 100644 index 00000000000..5d3562e14b0 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/06_drop_queue_table.mdx @@ -0,0 +1,44 @@ +--- +title: "DROP_QUEUE_TABLE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/drop_queue_table.html" +--- + +Use the `DROP_QUEUE_TABLE` procedure to delete a queue table. The signature is: + +```text +DROP_QUEUE_TABLE( + IN VARCHAR2, + IN BOOLEAN default FALSE, + IN BOOLEAN default TRUE) +``` + +**Parameters** + +`queue_table` + + The (optionally schema-qualified) name of the queue table. + +`force` + + The `force` keyword determines the behavior of the `DROP_QUEUE_TABLE` command when dropping a table that contain entries: + +- If the target table contains entries and force is `FALSE`, the command will fail, and the server will issue an error. + +- If the target table contains entries and force is `TRUE`, the command will drop the table and any dependent objects. + +`auto_commit` + + `auto_commit` is accepted for compatibility, but is ignored. + +**Example** + +The following anonymous block drops a table named `work_order_table`: + +```text +BEGIN + DBMS_AQADM.DROP_QUEUE_TABLE ('work_order_table', force => TRUE); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/07_purge_queue_table.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/07_purge_queue_table.mdx new file mode 100644 index 00000000000..a08cfd104f4 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/07_purge_queue_table.mdx @@ -0,0 +1,48 @@ +--- +title: "PURGE_QUEUE_TABLE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/purge_queue_table.html" +--- + +Use the `PURGE_QUEUE_TABLE` procedure to delete messages from a queue table. The signature is: + +```text +PURGE_QUEUE_TABLE( + IN VARCHAR2, + IN VARCHAR2, + IN aq$_purge_options_t) +``` + +**Parameters** + +`queue_table` + + `queue_table` specifies the name of the queue table from which you are deleting a message. + +`purge_condition` + + Use `purge_condition` to specify a condition (a SQL `WHERE` clause) that the server will evaluate when deciding which messages to purge. + +`purge_options` + + `purge_options` is an object of the type `aq$_purge_options_t`. An `aq$_purge_options_t` object contains: + +| Attribute | Type | Description | +| --------------- | ------- | ------------------------------------------------------------------------------------------------------------------- | +| `block` | Boolean | Specify `TRUE` if an exclusive lock should be held on all queues within the table; the default is `FALSE`. | +| `delivery_mode` | INTEGER | `delivery_mode` specifies the type of message that will be purged. The only accepted value is `DBMS_AQ.PERSISTENT`. | + +**Example** + +The following anonymous block removes any messages from the `work_order_table` with a value in the `completed` column of `YES`: + +```text +DECLARE + purge_options dbms_aqadm.aq$_purge_options_t; +BEGIN + dbms_aqadm.purge_queue_table('work_order_table', 'completed = YES', +purge_options); + END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/08_start_queue.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/08_start_queue.mdx new file mode 100644 index 00000000000..255d0eb9fda --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/08_start_queue.mdx @@ -0,0 +1,41 @@ +--- +title: "START_QUEUE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/start_queue.html" +--- + +Use the `START_QUEUE` procedure to make a queue available for enqueuing and dequeueing. The signature is: + +```text +START_QUEUE( + IN VARCHAR2, + IN BOOLEAN DEFAULT TRUE, + IN BOOLEAN DEFAULT TRUE) +``` + +**Parameters** + +`queue_name` + + `queue_name` specifies the name of the queue that you are starting. + +`enqueue` + + Specify `TRUE` to enable enqueueing (the default), or `FALSE` to leave the current setting unchanged. + +`dequeue` + + Specify `TRUE` to enable dequeueing (the default), or `FALSE` to leave the current setting unchanged. + +**Example** + +The following anonymous block makes a queue named `work_order` available for enqueueing: + +```text +BEGIN +DBMS_AQADM.START_QUEUE +(queue_name => 'work_order); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/09_stop_queue.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/09_stop_queue.mdx new file mode 100644 index 00000000000..d7f7ad97e81 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/09_stop_queue.mdx @@ -0,0 +1,48 @@ +--- +title: "STOP_QUEUE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/stop_queue.html" +--- + +Use the `STOP_QUEUE` procedure to disable enqueuing or dequeueing on a specified queue. The signature is: + +```text +STOP_QUEUE( + IN VARCHAR2, + IN BOOLEAN DEFAULT TRUE, + IN BOOLEAN DEFAULT TRUE, + IN BOOLEAN DEFAULT TRUE) +``` + +**Parameters** + +`queue_name` + + `queue_name` specifies the name of the queue that you are stopping. + +`enqueue` + + Specify `TRUE` to disable enqueueing (the default), or `FALSE` to leave the current setting unchanged. + +`dequeue` + + Specify `TRUE` to disable dequeueing (the default), or `FALSE` to leave the current setting unchanged. + +`wait` + + Specify `TRUE` to instruct the server to wait for any uncompleted transactions to complete before applying the specified changes; while waiting to stop the queue, no transactions are allowed to enqueue or dequeue from the specified queue. Specify `FALSE` to stop the queue immediately. + +**Example** + +The following anonymous block disables enqueueing and dequeueing from the queue named `work_order`: + +```text +BEGIN +DBMS_AQADM.STOP_QUEUE(queue_name =>'work_order', enqueue=>TRUE, +dequeue=>TRUE, wait=>TRUE); +END; +``` + +Enqueueing and dequeueing will stop after any outstanding transactions complete. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/index.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/index.mdx new file mode 100644 index 00000000000..eee3b80bf16 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/03_dbms_aqadm/index.mdx @@ -0,0 +1,53 @@ +--- +title: "DBMS_AQADM" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_aqadm.html" +--- + +EDB Postgres Advanced Server Advanced Queueing provides message queueing and message processing for the Advanced Server database. User-defined messages are stored in a queue; a collection of queues is stored in a queue table. Procedures in the `DBMS_AQADM` package create and manage message queues and queue tables. Use the `DBMS_AQ` package to add messages to a queue or remove messages from a queue, or register or unregister a PL/SQL callback procedure. + +Advanced Server also provides extended (non-compatible) functionality for the `DBMS_AQ` package with SQL commands, see the *Database Compatibility for Oracle Developers SQL Guide* for detailed information about the following SQL commands: + +- `ALTER QUEUE` +- `ALTER QUEUE TABLE` +- `CREATE QUEUE` +- `CREATE QUEUE TABLE` +- `DROP QUEUE` +- `DROP QUEUE TABLE` + +The `DBMS_AQADM` package provides procedures that allow you to create and manage queues and queue tables. + +| Function/Procedure | Return Type | Description | +| -------------------- | ----------- | ----------------------------------------------------------------- | +| `ALTER_QUEUE` | n/a | Modify an existing queue. | +| `ALTER_QUEUE_TABLE` | n/a | Modify an existing queue table. | +| `CREATE_QUEUE` | n/a | Create a queue. | +| `CREATE_QUEUE_TABLE` | n/a | Create a queue table. | +| `DROP_QUEUE` | n/a | Drop an existing queue. | +| `DROP_QUEUE_TABLE` | n/a | Drop an existing queue table. | +| `PURGE_QUEUE_TABLE` | n/a | Remove one or more messages from a queue table. | +| `START_QUEUE` | n/a | Make a queue available for enqueueing and dequeueing procedures. | +| `STOP_QUEUE` | n/a | Make a queue unavailable for enqueueing and dequeueing procedures | + +Advanced Server's implementation of `DBMS_AQADM` is a partial implementation when compared to Oracle's version. Only those functions and procedures listed in the table above are supported. + +Advanced Server supports use of the arguments listed below: + +| Constant | Description | For Parameters | +| --------------------------------------- | ----------------------------------------------------------- | --------------------------------- | +| `DBMS_AQADM.TRANSACTIONAL(1)` | This constant is defined, but will return an error if used. | `message_grouping` | +| `DBMS_AQADM.NONE(0)` | Use to specify message grouping for a queue table. | `message_grouping` | +| `DBMS_AQADM.NORMAL_QUEUE(0)` | Use with `create_queue` to `specify queue_type`. | `queue_type` | +| `DBMS_AQADM.EXCEPTION_QUEUE (1)` | Use with `create_queue` to specify `queue_type`. | `queue_type` | +| `DBMS_AQADM.INFINITE(-1)` | Use with `create_queue` to specify `retention_time`. | `retention_time` | +| `DBMS_AQADM.PERSISTENT (0)` | The message should be stored in a table. | `enqueue_options_t.delivery_mode` | +| `DBMS_AQADM.BUFFERED (1)` | This constant is defined, but will return an error if used. | `enqueue_options_t.delivery_mode` | +| `DBMS_AQADM.PERSISTENT_OR_BUFFERED (2)` | This constant is defined, but will return an error if used. | `enqueue_options_t.delivery_mode` | + +
+ +alter_queue alter_queue_table create_queue create_queue_table drop_queue drop_queue_table purge_queue_table start_queue stop_queue + +
diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/01_decrypt.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/01_decrypt.mdx new file mode 100644 index 00000000000..2a822fc109b --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/01_decrypt.mdx @@ -0,0 +1,98 @@ +--- +title: "DECRYPT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/decrypt.html" +--- + +The `DECRYPT` function or procedure decrypts data using a user-specified cipher algorithm, key and optional initialization vector. The signature of the `DECRYPT` function is: + +```text +DECRYPT + ( IN RAW, IN INTEGER, IN RAW, IN RAW + DEFAULT NULL) RETURN RAW +``` + +The signature of the `DECRYPT` procedure is: + +```text +DECRYPT + ( INOUT BLOB, IN BLOB, IN INTEGER, IN RAW, + IN RAW DEFAULT NULL) +``` + +or + +```text +DECRYPT + ( INOUT CLOB, IN CLOB, IN INTEGER, IN RAW, + IN RAW DEFAULT NULL) +``` + +When invoked as a procedure, `DECRYPT` returns `BLOB` or `CLOB` data to a user-specified `BLOB`. + +**Parameters** + +`dst` + + `dst` specifies the name of a `BLOB` to which the output of the `DECRYPT` procedure will be written. The `DECRYPT` procedure will overwrite any existing data currently in `dst`. + +`src` + + `src` specifies the source data that will be decrypted. If you are invoking `DECRYPT` as a function, specify `RAW` data; if invoking `DECRYPT` as a procedure, specify `BLOB` or `CLOB` data. + +`typ` + + `typ` specifies the block cipher type and any modifiers. This should match the type specified when the `src` was encrypted. Advanced Server supports the following block cipher algorithms, modifiers and cipher suites: + +| **Block Cipher Algorithms** | | +| ---------------------------------- | ----------------------------------------------------------- | +| `ENCRYPT_DES` | `CONSTANT INTEGER := 1;` | +| `ENCRYPT_3DES` | `CONSTANT INTEGER := 3;` | +| `ENCRYPT_AES` | `CONSTANT INTEGER := 4;` | +| `ENCRYPT_AES128` | `CONSTANT INTEGER := 6;` | +| **Block Cipher Modifiers** | | +| `CHAIN_CBC` | `CONSTANT INTEGER := 256;` | +| `CHAIN_ECB` | `CONSTANT INTEGER := 768;` | +| **Block Cipher Padding Modifiers** | | +| `PAD_PKCS5` | `CONSTANT INTEGER := 4096;` | +| `PAD_NONE` | `CONSTANT INTEGER := 8192;` | +| **Block Cipher Suites** | | +| `DES_CBC_PKCS5` | `CONSTANT INTEGER := ENCRYPT_DES + CHAIN_CBC + PAD_PKCS5;` | +| `DES3_CBC_PKCS5` | `CONSTANT INTEGER := ENCRYPT_3DES + CHAIN_CBC + PAD_PKCS5;` | +| `AES_CBC_PKCS5` | `CONSTANT INTEGER := ENCRYPT_AES + CHAIN_CBC + PAD_PKCS5;` | + +`key` + + `key` specifies the user-defined decryption key. This should match the key specified when the `src` was encrypted. + +`iv` + + `iv` (optional) specifies an initialization vector. If an initialization vector was specified when the `src` was encrypted, you must specify an initialization vector when decrypting the `src`. The default is `NULL`. + +**Examples** + +The following example uses the `DBMS_CRYPTO.DECRYPT` function to decrypt an encrypted password retrieved from the `passwords` table: + +```text +CREATE TABLE passwords +( + principal VARCHAR2(90) PRIMARY KEY, -- username + ciphertext RAW(9) -- encrypted password +); + +CREATE FUNCTION get_password(username VARCHAR2) RETURN RAW AS + typ INTEGER := DBMS_CRYPTO.DES_CBC_PKCS5; + key RAW(128) := 'my secret key'; + iv RAW(100) := 'my initialization vector'; + password RAW(2048); +BEGIN + + SELECT ciphertext INTO password FROM passwords WHERE principal = username; + + RETURN dbms_crypto.decrypt(password, typ, key, iv); +END; +``` + +Note that when calling `DECRYPT`, you must pass the same cipher type, key value and initialization vector that was used when `ENCRYPTING` the target. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/02_encrypt.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/02_encrypt.mdx new file mode 100644 index 00000000000..6152bf1f3f3 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/02_encrypt.mdx @@ -0,0 +1,95 @@ +--- +title: "ENCRYPT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/encrypt.html" +--- + +The `ENCRYPT` function or procedure uses a user-specified algorithm, key, and optional initialization vector to encrypt `RAW`, `BLOB` or `CLOB` data. The signature of the `ENCRYPT` function is: + +```text +ENCRYPT + ( IN RAW, IN INTEGER, IN RAW, + IN RAW DEFAULT NULL) RETURN RAW +``` + +The signature of the `ENCRYPT` procedure is: + +```text +ENCRYPT + ( INOUT BLOB, IN BLOB, IN INTEGER, IN RAW, + IN RAW DEFAULT NULL) +``` + +or + +```text +ENCRYPT + ( INOUT BLOB, IN CLOB, IN INTEGER, IN RAW, + IN RAW DEFAULT NULL) +``` + +When invoked as a procedure, `ENCRYPT` returns `BLOB` or `CLOB` data to a user-specified `BLOB`. + +**Parameters** + +`dst` + + `dst` specifies the name of a `BLOB` to which the output of the `ENCRYPT` procedure will be written. The `ENCRYPT` procedure will overwrite any existing data currently in `dst`. + +`src` + + `src` specifies the source data that will be encrypted. If you are invoking `ENCRYPT` as a function, specify `RAW` data; if invoking `ENCRYPT` as a procedure, specify `BLOB` or `CLOB` data. + +`typ` + + `typ` specifies the block cipher type that will be used by `ENCRYPT`, and any modifiers. Advanced Server supports the block cipher algorithms, modifiers and cipher suites listed below: + +| **Block Cipher Algorithms** | | +| ---------------------------------- | ----------------------------------------------------------- | +| `ENCRYPT_DES` | `CONSTANT INTEGER := 1;` | +| `ENCRYPT_3DES` | `CONSTANT INTEGER := 3;` | +| `ENCRYPT_AES` | `CONSTANT INTEGER := 4;` | +| `ENCRYPT_AES128` | `CONSTANT INTEGER := 6;` | +| **Block Cipher Modifiers** | | +| `CHAIN_CBC` | `CONSTANT INTEGER := 256;` | +| `CHAIN_ECB` | `CONSTANT INTEGER := 768;` | +| **Block Cipher Padding Modifiers** | | +| `PAD_PKCS5` | `CONSTANT INTEGER := 4096;` | +| `PAD_NONE` | `CONSTANT INTEGER := 8192;` | +| **Block Cipher Suites** | | +| `DES_CBC_PKCS5` | `CONSTANT INTEGER := ENCRYPT_DES + CHAIN_CBC + PAD_PKCS5;` | +| `DES3_CBC_PKCS5` | `CONSTANT INTEGER := ENCRYPT_3DES + CHAIN_CBC + PAD_PKCS5;` | +| `AES_CBC_PKCS5` | `CONSTANT INTEGER := ENCRYPT_AES + CHAIN_CBC + PAD_PKCS5;` | + +`key` + + `key` specifies the encryption key. + +`iv` + + `iv` (optional) specifies an initialization vector. By default, `iv` is `NULL`. + +**Examples** + +The following example uses the `DBMS_CRYPTO.DES_CBC_PKCS5` Block Cipher Suite (a pre-defined set of algorithms and modifiers) to encrypt a value retrieved from the `passwords` table: + +```text +CREATE TABLE passwords +( + principal VARCHAR2(90) PRIMARY KEY, -- username + ciphertext RAW(9) -- encrypted password +); +CREATE PROCEDURE set_password(username VARCHAR2, cleartext RAW) AS + typ INTEGER := DBMS_CRYPTO.DES_CBC_PKCS5; + key RAW(128) := 'my secret key'; + iv RAW(100) := 'my initialization vector'; + encrypted RAW(2048); +BEGIN + encrypted := dbms_crypto.encrypt(cleartext, typ, key, iv); + UPDATE passwords SET ciphertext = encrypted WHERE principal = username; +END; +``` + +`ENCRYPT` uses a key value of `my secret key` and an initialization vector of `my initialization vector` when encrypting the `password`; specify the same key and initialization vector when decrypting the `password`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/03_hash.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/03_hash.mdx new file mode 100644 index 00000000000..32b5fdc2a02 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/03_hash.mdx @@ -0,0 +1,48 @@ +--- +title: "HASH" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/hash.html" +--- + +The `HASH` function uses a user-specified algorithm to return the hash value of a `RAW` or `CLOB` value. The `HASH` function is available in three forms: + +```text +HASH + ( IN RAW, IN INTEGER) RETURN RAW + +HASH + ( IN CLOB, IN INTEGER) RETURN RAW +``` + +**Parameters** + +`src` + + `src` specifies the value for which the hash value will be generated. You can specify a `RAW`, a `BLOB`, or a `CLOB` value. + +`typ` + + `typ` specifies the `HASH` function type. Advanced Server supports the `HASH` function types listed below: + +| **HASH Functions** | | +| ------------------ | ------------------------ | +| `HASH_MD4` | `CONSTANT INTEGER := 1;` | +| `HASH_MD5` | `CONSTANT INTEGER := 2;` | +| `HASH_SH1` | `CONSTANT INTEGER := 3;` | + +**Examples** + +The following example uses `DBMS_CRYPTO.HASH` to find the `md5` hash value of the string, `cleartext source`: + +```text +DECLARE + typ INTEGER := DBMS_CRYPTO.HASH_MD5; + hash_value RAW(100); +BEGIN + + hash_value := DBMS_CRYPTO.HASH('cleartext source', typ); + +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/04_mac.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/04_mac.mdx new file mode 100644 index 00000000000..a726cb786a0 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/04_mac.mdx @@ -0,0 +1,54 @@ +--- +title: "MAC" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/mac.html" +--- + +The `MAC` function uses a user-specified `MAC` function to return the hashed `MAC` value of a `RAW` or `CLOB` value. The `MAC` function is available in three forms: + +```text +MAC + ( IN RAW, IN INTEGER, IN RAW) RETURN RAW + +MAC + ( IN CLOB, IN INTEGER, IN RAW) RETURN RAW +``` + +**Parameters** + +`src` + + `src` specifies the value for which the `MAC` value will be generated. Specify a `RAW`, `BLOB`, or `CLOB` value. + +`typ` + + `typ` specifies the `MAC` function used. Advanced Server supports the `MAC` functions listed below. + +| **MAC Functions** | | +| ----------------- | ------------------------ | +| `HMAC_MD5` | `CONSTANT INTEGER := 1;` | +| `HMAC_SH1` | `CONSTANT INTEGER := 2;` | + +`key` + + `key` specifies the key that will be used to calculate the hashed `MAC` value. + +**Examples** + +The following example finds the hashed `MAC` value of the string `cleartext source`: + +```text +DECLARE + typ INTEGER := DBMS_CRYPTO.HMAC_MD5; + key RAW(100) := 'my secret key'; + mac_value RAW(100); +BEGIN + + mac_value := DBMS_CRYPTO.MAC('cleartext source', typ, key); + +END; +``` + +`DBMS_CRYPTO.MAC` uses a key value of `my secret` key when calculating the `MAC` value of `cleartext source`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/05_randombytes.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/05_randombytes.mdx new file mode 100644 index 00000000000..980798e1957 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/05_randombytes.mdx @@ -0,0 +1,32 @@ +--- +title: "RANDOMBYTES" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/randombytes.html" +--- + +The `RANDOMBYTES` function returns a `RAW` value of the specified length, containing cryptographically random bytes. The signature is: + +```text +RANDOMBYTES + ( IN INTEGER) RETURNS RAW +``` + +**Parameter** + +`number_bytes` + + `number_bytes` specifies the number of random bytes to be returned + +**Examples** + +The following example uses `RANDOMBYTES` to return a value that is `1024` bytes long: + +```text +DECLARE + result RAW(1024); +BEGIN + result := DBMS_CRYPTO.RANDOMBYTES(1024); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/06_randominteger.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/06_randominteger.mdx new file mode 100644 index 00000000000..953cca23d6e --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/06_randominteger.mdx @@ -0,0 +1,26 @@ +--- +title: "RANDOMINTEGER" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/randominteger.html" +--- + +The `RANDOMINTEGER()` function returns a random `INTEGER` between `0` and `268,435,455`. The signature is: + +```text +RANDOMINTEGER() RETURNS INTEGER +``` + +**Examples** + +The following example uses the `RANDOMINTEGER` function to return a cryptographically strong random `INTEGER` value: + +```text +DECLARE + result INTEGER; +BEGIN + result := DBMS_CRYPTO.RANDOMINTEGER(); + DBMS_OUTPUT.PUT_LINE(result); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/07_randomnumber.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/07_randomnumber.mdx new file mode 100644 index 00000000000..a8653005477 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/07_randomnumber.mdx @@ -0,0 +1,26 @@ +--- +title: "RANDOMNUMBER" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/randomnumber.html" +--- + +The `RANDOMNUMBER()` function returns a random `NUMBER` between `0` and `268,435,455`. The signature is: + +```text +RANDOMNUMBER() RETURNS NUMBER +``` + +**Examples** + +The following example uses the `RANDOMNUMBER` function to return a cryptographically strong random number: + +```text +DECLARE + result NUMBER; +BEGIN + result := DBMS_CRYPTO.RANDOMNUMBER(); + DBMS_OUTPUT.PUT_LINE(result); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/index.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/index.mdx new file mode 100644 index 00000000000..a8ca3540d13 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/04_dbms_crypto/index.mdx @@ -0,0 +1,45 @@ +--- +title: "DBMS_CRYPTO" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_crypto.html" +--- + +The `DBMS_CRYPTO` package provides functions and procedures that allow you to encrypt or decrypt `RAW, BLOB` or `CLOB` data. You can also use `DBMS_CRYPTO` functions to generate cryptographically strong random values. + +The following table lists the `DBMS_CRYPTO` Functions and Procedures. + +| Function/Procedure | Return Type | Description | +| --------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------- | +| `DECRYPT(src, typ, key, iv)` | `RAW` | Decrypts `RAW` data. | +| `DECRYPT(dst INOUT, src, typ, key, iv)` | N/A | Decrypts `BLOB` data. | +| `DECRYPT(dst INOUT, src, typ, key, iv)` | N/A | Decrypts `CLOB` data. | +| `ENCRYPT(src, typ, key, iv)` | `RAW` | Encrypts `RAW` data. | +| `ENCRYPT(dst INOUT, src, typ, key, iv)` | N/A | Encrypts `BLOB` data. | +| `ENCRYPT(dst INOUT, src, typ, key, iv)` | N/A | Encrypts `CLOB` data. | +| `HASH(src, typ)` | `RAW` | Applies a hash algorithm to `RAW` data. | +| `HASH(src)` | `RAW` | Applies a hash algorithm to `CLOB` data. | +| `MAC(src, typ, key)` | `RAW` | Returns the hashed `MAC` value of the given `RAW` data using the specified hash algorithm and key. | +| `MAC(src, typ, key)` | `RAW` | Returns the hashed `MAC` value of the given `CLOB` data using the specified hash algorithm and key. | +| `RANDOMBYTES(number_bytes)` | `RAW` | Returns a specified number of cryptographically strong random bytes. | +| `RANDOMINTEGER()` | `INTEGER` | Returns a random `INTEGER`. | +| `RANDOMNUMBER()` | `NUMBER` | Returns a random `NUMBER.` | + +`DBMS_CRYPTO` functions and procedures support the following error messages: + +`ORA-28239 - DBMS_CRYPTO.KeyNull` + +`ORA-28829 - DBMS_CRYPTO.CipherSuiteNull` + +`ORA-28827 - DBMS_CRYPTO.CipherSuiteInvalid` + +Unlike Oracle, Advanced Server will not return error `ORA-28233` if you re-encrypt previously encrypted information. + +Please note that `RAW` and `BLOB` are synonyms for the PostgreSQL `BYTEA` data type, and `CLOB` is a synonym for `TEXT`. + +
+ +decrypt encrypt hash mac randombytes randominteger randomnumber + +
diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/01_broken.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/01_broken.mdx new file mode 100644 index 00000000000..ea07992b686 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/01_broken.mdx @@ -0,0 +1,45 @@ +--- +title: "BROKEN" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/broken.html" +--- + +The `BROKEN` procedure sets the state of a job to either broken or not broken. A broken job cannot be executed except by using the `RUN` procedure. + +```text +BROKEN( BINARY_INTEGER, BOOLEAN [, DATE ]) +``` + +**Parameters** + +`job` + + Identifier of the job to be set as broken or not broken. + +`broken` + + If set to `TRUE` the job’s state is set to broken. If set to `FALSE` the job’s state is set to not broken. Broken jobs cannot be run except by using the `RUN` procedure. + +`next_date` + + Date/time when the job is to be run. The default is `SYSDATE`. + +**Examples** + +Set the state of a job with job identifier 104 to broken: + +```text +BEGIN + DBMS_JOB.BROKEN(104,true); +END; +``` + +Change the state back to not broken: + +```text +BEGIN + DBMS_JOB.BROKEN(104,false); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/02_change.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/02_change.mdx new file mode 100644 index 00000000000..7718400e8f7 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/02_change.mdx @@ -0,0 +1,51 @@ +--- +title: "CHANGE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/change.html" +--- + +The `CHANGE` procedure modifies certain job attributes including the stored procedure to be run, the next date/time the job is to be run, and how often it is to be run. + +```text +CHANGE( BINARY_INTEGER VARCHAR2, DATE, + VARCHAR2, BINARY_INTEGER, BOOLEAN) +``` + +**Parameters** + +`job` + + Identifier of the job to modify. + +`what` + + Stored procedure name. Set this parameter to null if the existing value is to remain unchanged. + +`next_date` + + Date/time when the job is to be run next. Set this parameter to null if the existing value is to remain unchanged. + +`interval` + + Date function that when evaluated, provides the next date/time the job is to run. Set this parameter to null if the existing value is to remain unchanged. + +`instance` + + This argument is ignored, but is included for compatibility. + +`force` + + This argument is ignored, but is included for compatibility. + +**Examples** + +Change the job to run next on December 13, 2007. Leave other parameters unchanged. + +```text +BEGIN + DBMS_JOB.CHANGE(104,NULL,TO_DATE('13-DEC-07','DD-MON-YY'),NULL, NULL, + NULL); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/03_interval.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/03_interval.mdx new file mode 100644 index 00000000000..5240748dea4 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/03_interval.mdx @@ -0,0 +1,33 @@ +--- +title: "INTERVAL" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/interval.html" +--- + +The `INTERVAL` procedure sets the frequency of how often a job is to be run. + +```text +INTERVAL( BINARY_INTEGER, VARCHAR2) +``` + +**Parameters** + +`job` + + Identifier of the job to modify. + +`interval` + + Date function that when evaluated, provides the next date/time the job is to be run. If `interval` is `NULL` and the job is complete, the job is removed from the queue. + +**Examples** + +Change the job to run once a week: + +```text +BEGIN + DBMS_JOB.INTERVAL(104,'SYSDATE + 7'); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/04_next_date.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/04_next_date.mdx new file mode 100644 index 00000000000..a7243eceb40 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/04_next_date.mdx @@ -0,0 +1,33 @@ +--- +title: "NEXT_DATE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/next_date.html" +--- + +The `NEXT_DATE` procedure sets the date/time of when the job is to be run next. + +```text +NEXT_DATE( BINARY_INTEGER, DATE) +``` + +**Parameters** + +`job` + + Identifier of the job whose next run date is to be set. + +`next_date` + + Date/time when the job is to be run next. + +**Examples** + +Change the job to run next on December 14, 2007: + +```text +BEGIN + DBMS_JOB.NEXT_DATE(104, TO_DATE('14-DEC-07','DD-MON-YY')); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/05_remove.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/05_remove.mdx new file mode 100644 index 00000000000..de817c8a22a --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/05_remove.mdx @@ -0,0 +1,29 @@ +--- +title: "REMOVE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/remove.html" +--- + +The `REMOVE` procedure deletes the specified job from the database. The job must be resubmitted using the `SUBMIT` procedure in order to have it executed again. Note that the stored procedure that was associated with the job is not deleted. + +```text +REMOVE( BINARY_INTEGER) +``` + +**Parameter** + +`job` + + Identifier of the job that is to be removed from the database. + +**Examples** + +Remove a job from the database: + +```text +BEGIN + DBMS_JOB.REMOVE(104); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/06_run.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/06_run.mdx new file mode 100644 index 00000000000..465309e4c23 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/06_run.mdx @@ -0,0 +1,29 @@ +--- +title: "RUN" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/run.html" +--- + +The `RUN` procedure forces the job to be run, even if its state is broken. + +```text +RUN( BINARY_INTEGER) +``` + +**Parameter** + +`job` + + Identifier of the job to be run. + +**Examples** + +Force a job to be run. + +```text +BEGIN + DBMS_JOB.RUN(104); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/07_submit.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/07_submit.mdx new file mode 100644 index 00000000000..95a1b199c10 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/07_submit.mdx @@ -0,0 +1,65 @@ +--- +title: "SUBMIT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/submit.html" +--- + +The `SUBMIT` procedure creates a job definition and stores it in the database. A job consists of a job identifier, the stored procedure to be executed, when the job is to be first run, and a date function that calculates the next date/time the job is to be run. + +```text +SUBMIT( OUT BINARY_INTEGER, VARCHAR2 + [, DATE [, VARCHAR2 [, BOOLEAN ]]]) +``` + +**Parameters** + +`job` + + Identifier assigned to the job. + +`what` + + Name of the stored procedure to be executed by the job. + +`next_date` + + Date/time when the job is to be run next. The default is `SYSDATE`. + +`interval` + + Date function that when evaluated, provides the next date/time the job is to run. If `interval` is set to null, then the job is run only once. Null is the default. + +`no_parse` + + If set to `TRUE`, do not syntax-check the stored procedure upon job creation – check only when the job first executes. If set to `FALSE`, check the procedure upon job creation. The default is `FALSE`. + + **Note**: The `no_parse` option is not supported in this implementation of `SUBMIT()`. It is included for compatibility only. + +**Examples** + +The following example creates a job using stored procedure, `job_proc`. The job will execute immediately and run once a day thereafter as set by the `interval` parameter, `SYSDATE + 1`. + +```text +DECLARE + jobid INTEGER; +BEGIN + DBMS_JOB.SUBMIT(jobid,'job_proc;',SYSDATE, + 'SYSDATE + 1'); + DBMS_OUTPUT.PUT_LINE('jobid: ' || jobid); +END; + +jobid: 104 +``` + +The job immediately executes procedure, `job_proc`, populating table, `jobrun`, with a row: + +```text +SELECT * FROM jobrun; + + runtime +------------------------------------- + job_proc run at 2007-12-11 11:43:25 +(1 row) +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/08_what.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/08_what.mdx new file mode 100644 index 00000000000..9e324f51f85 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/08_what.mdx @@ -0,0 +1,33 @@ +--- +title: "WHAT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/what.html" +--- + +The `WHAT` procedure changes the stored procedure that the job will execute. + +```text +WHAT( BINARY_INTEGER, VARCHAR2) +``` + +**Parameters** + +`job` + + Identifier of the job for which the stored procedure is to be changed. + +`what` + + Name of the stored procedure to be executed. + +**Examples** + +Change the job to run the `list_emp` procedure: + +```text +BEGIN + DBMS_JOB.WHAT(104,'list_emp;'); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/index.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/index.mdx new file mode 100644 index 00000000000..7924b5bc0b4 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/05_dbms_job/index.mdx @@ -0,0 +1,60 @@ +--- +title: "DBMS_JOB" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_job.html" +--- + +The `DBMS_JOB` package provides for the creation, scheduling, and managing of jobs. A job runs a stored procedure which has been previously stored in the database. The `SUBMIT` procedure is used to create and store a job definition. A job identifier is assigned to a job along with its associated stored procedure and the attributes describing when and how often the job is to be run. + +This package relies on the `pgAgent` scheduler. By default, the Advanced Server installer installs `pgAgent`, but you must start the `pgAgent` service manually prior to using `DBMS_JOB`. If you attempt to use this package to schedule a job after un-installing `pgAgent, DBMS_JOB` will throw an error. `DBMS_JOB` verifies that `pgAgent` is installed, but does not verify that the service is running. + +The following table lists the supported `DBMS_JOB` procedures: + +| Function/Procedure | Return Type | Description | +| ---------------------------------------------------------------- | ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `BROKEN(job, broken [, next_date ])` | n/a | Specify that a given job is either broken or not broken. | +| `CHANGE(job, what, next_date, interval, instance, force)` | n/a | Change the job’s parameters. | +| `INTERVAL(job, interval)` | n/a | Set the execution frequency by means of a date function that is recalculated each time the job is run. This value becomes the next date/time for execution. | +| `NEXT_DATE(job, next_date)` | n/a | Set the next date/time the job is to be run. | +| `REMOVE(job)` | n/a | Delete the job definition from the database. | +| `RUN(job)` | n/a | Forces execution of a job even if it is marked broken. | +| `SUBMIT(job OUT, what [, next_date [, interval [, no_parse ]]])` | n/a | Creates a job and stores its definition in the database. | +| `WHAT(job, what)` | n/a | Change the stored procedure run by a job. | + +Advanced Server's implementation of `DBMS_JOB` is a partial implementation when compared to Oracle's version. Only those functions and procedures listed in the table above are supported. + +Before using `DBMS_JOB`, a database superuser must create the `pgAgent` extension. Use the `psql` client to connect to a database and invoke the command: + +```text +CREATE EXTENSION pgagent; +``` + +When and how often a job is run is dependent upon two interacting parameters – `next_date` and `interval`. The `next_date` parameter is a date/time value that specifies the next date/time when the job is to be executed. The `interval` parameter is a string that contains a date function that evaluates to a date/time value. + +Just prior to any execution of the job, the expression in the `interval` parameter is evaluated. The resulting value replaces the `next_date` value stored with the job. The job is then executed. In this manner, the expression in `interval` is repeatedly re-evaluated prior to each job execution, supplying the `next_date` date/time for the next execution. + +!!! Note + The database user must be the same that created a job and schedule to start the `pgAgent` server and execute the job. + +The following examples use the following stored procedure, `job_proc`, which simply inserts a timestamp into table, `jobrun`, containing a single `VARCHAR2` column. + +```text +CREATE TABLE jobrun ( + runtime VARCHAR2(40) +); + +CREATE OR REPLACE PROCEDURE job_proc +IS +BEGIN + INSERT INTO jobrun VALUES ('job_proc run at ' || TO_CHAR(SYSDATE, + 'yyyy-mm-dd hh24:mi:ss')); +END; +``` + +
+ +broken change interval next_date remove run submit what + +
diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/01_append.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/01_append.mdx new file mode 100644 index 00000000000..17591dfe6ca --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/01_append.mdx @@ -0,0 +1,23 @@ +--- +title: "APPEND" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/append.html" +--- + +The `APPEND` procedure provides the capability to append one large object to another. Both large objects must be of the same type. + +```text +APPEND( IN OUT { BLOB | CLOB }, { BLOB | CLOB }) +``` + +**Parameters** + +`dest_lob` + + Large object locator for the destination object. Must be the same data type as `src_lob`. + +`src_lob` + + Large object locator for the source object. Must be the same data type as `dest_lob`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/02_compare.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/02_compare.mdx new file mode 100644 index 00000000000..fe149fb3ee8 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/02_compare.mdx @@ -0,0 +1,41 @@ +--- +title: "COMPARE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/compare.html" +--- + +The `COMPARE` procedure performs an exact byte-by-byte comparison of two large objects for a given length at given offsets. The large objects being compared must be the same data type. + +```text + INTEGER COMPARE( { BLOB | CLOB }, + { BLOB | CLOB } + [, INTEGER [, INTEGER [, INTEGER ]]]) +``` + +**Parameters** + +`lob_1` + + Large object locator of the first large object to be compared. Must be the same data type as `lob_2`. + +`lob_2` + + Large object locator of the second large object to be compared. Must be the same data type as `lob_1`. + +`amount` + + If the data type of the large objects is `BLOB`, then the comparison is made for `amount` bytes. If the data type of the large objects is `CLOB`, then the comparison is made for `amount` characters. The default is the maximum size of a large object. + +`offset_1` + + Position within the first large object to begin the comparison. The first byte/character is offset 1. The default is 1. + +`offset_2` + + Position within the second large object to begin the comparison. The first byte/character is offset 1. The default is 1. + +`status` + + Zero if both large objects are exactly the same for the specified length for the specified offsets. Non-zero, if the objects are not the same. `NULL` if `amount`, `offset_1`, or `offset_2` are less than zero. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/03_converttoblob.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/03_converttoblob.mdx new file mode 100644 index 00000000000..9079205ea70 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/03_converttoblob.mdx @@ -0,0 +1,62 @@ +--- +title: "CONVERTTOBLOB" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/converttoblob.html" +--- + +The `CONVERTTOBLOB` procedure provides the capability to convert character data to binary. + +```text +CONVERTTOBLOB( IN OUT BLOB, CLOB, + INTEGER, IN OUT INTEGER, + IN OUT INTEGER, NUMBER, + IN OUT INTEGER, OUT INTEGER) +``` + +**Parameters** + +`dest_lob` + + `BLOB` large object locator to which the character data is to be converted. + +`src_clob` + + `CLOB` large object locator of the character data to be converted. + +`amount` + + Number of characters of `src_clob` to be converted. + +`dest_offset IN` + + Position in bytes in the destination `BLOB` where writing of the source `CLOB` should begin. The first byte is offset 1. + +`dest_offset OUT` + + Position in bytes in the destination `BLOB` after the write operation completes. The first byte is offset 1. + +`src_offset IN` + + Position in characters in the source `CLOB` where conversion to the destination `BLOB` should begin. The first character is offset 1. + +`src_offset OUT` + + Position in characters in the source `CLOB` after the conversion operation completes. The first character is offset 1. + +`blob_csid` + + Character set ID of the converted, destination `BLOB`. + +`lang_context IN` + + Language context for the conversion. The default value of 0 is typically used for this setting. + +`lang_context OUT` + + Language context after the conversion completes. + +`warning` + + 0 if the conversion was successful, 1 if an inconvertible character was encountered. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/04_converttoclob.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/04_converttoclob.mdx new file mode 100644 index 00000000000..acf1b735881 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/04_converttoclob.mdx @@ -0,0 +1,62 @@ +--- +title: "CONVERTTOCLOB" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/converttoclob.html" +--- + +The `CONVERTTOCLOB` procedure provides the capability to convert binary data to character. + +```text +CONVERTTOCLOB( IN OUT CLOB, BLOB, + INTEGER, IN OUT INTEGER, + IN OUT INTEGER, NUMBER, + IN OUT INTEGER, OUT INTEGER) +``` + +**Parameters** + +`dest_lob` + + `CLOB` large object locator to which the binary data is to be converted. + +`src_blob` + + `BLOB` large object locator of the binary data to be converted. + +`amount` + + Number of bytes of `src_blob` to be converted. + +`dest_offset IN` + + Position in characters in the destination `CLOB` where writing of the source `BLOB` should begin. The first character is offset 1. + +`dest_offset OUT` + + Position in characters in the destination `CLOB` after the write operation completes. The first character is offset 1. + +`src_offset IN` + + Position in bytes in the source `BLOB` where conversion to the destination `CLOB` should begin. The first byte is offset 1. + +`src_offset OUT` + + Position in bytes in the source `BLOB` after the conversion operation completes. The first byte is offset 1. + +`blob_csid` + + Character set ID of the converted, destination `CLOB`. + +`lang_context IN` + + Language context for the conversion. The default value of 0 is typically used for this setting. + +`lang_context OUT` + + Language context after the conversion completes. + +`warning` + + 0 if the conversion was successful, 1 if an inconvertible character was encountered. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/05_copy.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/05_copy.mdx new file mode 100644 index 00000000000..5368bf6eaec --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/05_copy.mdx @@ -0,0 +1,38 @@ +--- +title: "COPY" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/copy.html" +--- + +The `COPY` procedure provides the capability to copy one large object to another. The source and destination large objects must be the same data type. + +```text +COPY( IN OUT { BLOB | CLOB }, +{ BLOB | CLOB }, + INTEGER + [, INTEGER [, INTEGER ]]) +``` + +**Parameters** + +`dest_lob` + + Large object locator of the large object to which `src_lob` is to be copied. Must be the same data type as `src_lob`. + +`src_lob` + + Large object locator of the large object to be copied to `dest_lob`. Must be the same data type as `dest_lob`. + +`amount` + + Number of bytes/characters of `src_lob` to be copied. + +`dest_offset` + + Position in the destination large object where writing of the source large object should begin. The first position is offset 1. The default is 1. + +`src_offset` + + Position in the source large object where copying to the destination large object should begin. The first position is offset 1. The default is 1. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/06_erase.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/06_erase.mdx new file mode 100644 index 00000000000..e9f196b273c --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/06_erase.mdx @@ -0,0 +1,32 @@ +--- +title: "ERASE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/erase.html" +--- + +The `ERASE` procedure provides the capability to erase a portion of a large object. To erase a large object means to replace the specified portion with zero-byte fillers for `BLOBs` or with spaces for `CLOBs`. The actual size of the large object is not altered. + +```text +ERASE( IN OUT { BLOB | CLOB }, IN OUT INTEGER + [, INTEGER ]) +``` + +**Parameters** + +`lob_loc` + + Large object locator of the large object to be erased. + +`amount IN` + + Number of bytes/characters to be erased. + +`amount OUT` + + Number of bytes/characters actually erased. This value can be smaller than the input value if the end of the large object is reached before `amount` bytes/characters have been erased. + +`offset` + + Position in the large object where erasing is to begin. The first byte/character is position 1. The default is 1. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/07_get_storage_limit.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/07_get_storage_limit.mdx new file mode 100644 index 00000000000..a6223b79e70 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/07_get_storage_limit.mdx @@ -0,0 +1,25 @@ +--- +title: "GET_STORAGE_LIMIT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/get_storage_limit.html" +--- + +The `GET_STORAGE_LIMIT` function returns the limit on the largest allowable large object. + +```text + INTEGER GET_STORAGE_LIMIT( BLOB) + + INTEGER GET_STORAGE_LIMIT( CLOB) +``` + +**Parameters** + +`size` + + Maximum allowable size of a large object in this database. + +`lob_loc` + + This parameter is ignored, but is included for compatibility. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/08_getlength.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/08_getlength.mdx new file mode 100644 index 00000000000..06cf2c697a8 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/08_getlength.mdx @@ -0,0 +1,25 @@ +--- +title: "GETLENGTH" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/getlength.html" +--- + +The `GETLENGTH` function returns the length of a large object. + +```text + INTEGER GETLENGTH( BLOB) + + INTEGER GETLENGTH( CLOB) +``` + +**Parameters** + +`lob_loc` + + Large object locator of the large object whose length is to be obtained. + +`amount` + + Length of the large object in bytes for `BLOBs` or characters for `CLOBs`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/09_instr.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/09_instr.mdx new file mode 100644 index 00000000000..dee4d8c1bda --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/09_instr.mdx @@ -0,0 +1,36 @@ +--- +title: "INSTR" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/instr.html" +--- + +The `INSTR` function returns the location of the nth occurrence of a given pattern within a large object. + +```text + INTEGER INSTR( { BLOB | CLOB }, + { RAW | VARCHAR2 } [, INTEGER [, INTEGER ]]) +``` + +**Parameters** + +`lob_loc` + + Large object locator of the large object in which to search for pattern. + +`pattern` + + Pattern of bytes or characters to match against the large object, `lob. pattern` must be `RAW` if `lob_loc` is a `BLOB`. pattern must be `VARCHAR2` if `lob_loc` is a `CLOB`. + +`offset` + + Position within `lob_loc` to start search for `pattern`. The first byte/character is position 1. The default is 1. + +`nth` + + Search for `pattern`, `nth` number of times starting at the position given by `offset`. The default is 1. + +`position` + + Position within the large object where `pattern` appears the nth time specified by `nth` starting from the position given by `offset`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/10_read.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/10_read.mdx new file mode 100644 index 00000000000..603699500bd --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/10_read.mdx @@ -0,0 +1,36 @@ +--- +title: "READ" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/read.html" +--- + +The `READ` procedure provides the capability to read a portion of a large object into a buffer. + +```text +READ( { BLOB | CLOB }, IN OUT BINARY_INTEGER, + INTEGER, OUT { RAW | VARCHAR2 }) +``` + +**Parameters** + +`lob_loc` + + Large object locator of the large object to be read. + +`amount IN` + + Number of bytes/characters to read. + +`amount OUT` + + Number of bytes/characters actually read. If there is no more data to be read, then `amount` returns 0 and a `DATA_NOT_FOUND` exception is thrown. + +`offset` + + Position to begin reading. The first byte/character is position 1. + +`buffer` + + Variable to receive the large object. If `lob_loc` is a `BLOB`, then `buffer` must be `RAW`. If `lob_loc` is a `CLOB`, then `buffer` must be `VARCHAR2`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/11_substr.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/11_substr.mdx new file mode 100644 index 00000000000..a8b713a6983 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/11_substr.mdx @@ -0,0 +1,32 @@ +--- +title: "SUBSTR" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/substr.html" +--- + +The `SUBSTR` function provides the capability to return a portion of a large object. + +```text + { RAW | VARCHAR2 } SUBSTR( { BLOB | CLOB } + [, INTEGER [, INTEGER ]]) +``` + +**Parameters** + +`lob_loc` + + Large object locator of the large object to be read. + +`amount` + + Number of bytes/characters to be returned. Default is 32,767. + +`offset` + + Position within the large object to begin returning data. The first byte/character is position 1. The default is 1. + +`data` + + Returned portion of the large object to be read. If `lob_loc` is a `BLOB`, the return data type is `RAW`. If `lob_loc` is a `CLOB`, the return data type is `VARCHAR2`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/12_trim.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/12_trim.mdx new file mode 100644 index 00000000000..a7510fe814c --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/12_trim.mdx @@ -0,0 +1,23 @@ +--- +title: "TRIM" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/trim.html" +--- + +The `TRIM` procedure provides the capability to truncate a large object to the specified length. + +```text +TRIM( IN OUT { BLOB | CLOB }, INTEGER) +``` + +**Parameters** + +`lob_loc` + + Large object locator of the large object to be trimmed. + +`newlen` + + Number of bytes/characters to which the large object is to be trimmed. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/13_write.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/13_write.mdx new file mode 100644 index 00000000000..a989d3ea709 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/13_write.mdx @@ -0,0 +1,33 @@ +--- +title: "WRITE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/write.html" +--- + +The `WRITE` procedure provides the capability to write data into a large object. Any existing data in the large object at the specified offset for the given length is overwritten by data given in the buffer. + +```text +WRITE( IN OUT { BLOB | CLOB }, + BINARY_INTEGER, + INTEGER, { RAW | VARCHAR2 }) +``` + +**Parameters** + +`lob_loc` + + Large object locator of the large object to be written. + +`amount` + + The number of bytes/characters in `buffer` to be written to the large object. + +`offset` + + The offset in bytes/characters from the beginning of the large object (origin is 1) for the write operation to begin. + +`buffer` + + Contains data to be written to the large object. If `lob_loc` is a `BLOB`, then `buffer` must be `RAW`. If `lob_loc` is a `CLOB`, then `buffer` must be `VARCHAR2`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/14_writeappend.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/14_writeappend.mdx new file mode 100644 index 00000000000..d05524a6238 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/14_writeappend.mdx @@ -0,0 +1,28 @@ +--- +title: "WRITEAPPEND" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/writeappend.html" +--- + +The `WRITEAPPEND` procedure provides the capability to add data to the end of a large object. + +```text +WRITEAPPEND( IN OUT { BLOB | CLOB }, + BINARY_INTEGER, { RAW | VARCHAR2 }) +``` + +**Parameters** + +`lob_loc` + + Large object locator of the large object to which data is to be appended. + +`amount` + + Number of bytes/characters from `buffer` to be appended the large object. + +`buffer` + + Data to be appended to the large object. If `lob_loc` is a `BLOB`, then `buffer` must be `RAW`. If `lob_loc` is a `CLOB`, then `buffer` must be `VARCHAR2`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/index.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/index.mdx new file mode 100644 index 00000000000..85765456be4 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/06_dbms_lob/index.mdx @@ -0,0 +1,58 @@ +--- +title: "DBMS_LOB" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_lob.html" +--- + +The `DBMS_LOB` package provides the capability to operate on large objects. The following table lists the supported functions and procedures: + +| Function/Procedure | Return Type | Description | +| -------------------------------------------------------------------------------------------------------------------------------------- | --------------- | -------------------------------------------------------------------------------------------- | +| `APPEND(dest_lob IN OUT, src_lob)` | n/a | Appends one large object to another. | +| `COMPARE(lob_1, lob_2 [, amount [, offset_1 [, offset_2 ]]])` | `INTEGER` | Compares two large objects. | +| `CONVERTOBLOB(dest_lob IN OUT, src_clob, amount, dest_offset IN OUT, src_offset IN OUT, blob_csid, lang_context IN OUT, warning OUT)` | n/a | Converts character data to binary. | +| `CONVERTTOCLOB(dest_lob IN OUT, src_blob, amount, dest_offset IN OUT, src_offset IN OUT, blob_csid, lang_context IN OUT, warning OUT)` | n/a | Converts binary data to character. | +| `COPY(dest_lob IN OUT, src_lob, amount [, dest_offset [, src_offset ]])` | n/a | Copies one large object to another. | +| `ERASE(lob_loc IN OUT, amount IN OUT [, offset ])` | n/a | Erase a large object. | +| `GET_STORAGE_LIMIT(lob_loc)` | `INTEGER` | Get the storage limit for large objects. | +| `GETLENGTH(lob_loc)` | `INTEGER` | Get the length of the large object. | +| `INSTR(lob_loc, pattern [, offset [, nth ]])` | `INTEGER` | Get the position of the nth occurrence of a pattern in the large object starting at `offset` | +| `READ(lob_loc, amount IN OUT, offset, buffer OUT)` | n/a | Read a large object. | +| `SUBSTR(lob_loc [, amount [, offset ]])` | `RAW, VARCHAR2` | Get part of a large object. | +| `TRIM(lob_loc IN OUT, newlen)` | n/a | Trim a large object to the specified length. | +| `WRITE(lob_loc IN OUT, amount, offset, buffer)` | n/a | Write data to a large object. | +| `WRITEAPPEND(lob_loc IN OUT, amount, buffer)` | n/a | Write data from the buffer to the end of a large object. | + +Advanced Server's implementation of `DBMS_LOB` is a partial implementation when compared to Oracle's version. Only those functions and procedures listed in the table above are supported. + +The following table lists the public variables available in the package. + +| **Public Variables** | **Data Type** | **Value** | +| ------------------------- | ------------- | ------------ | +| `compress off` | `INTEGER` | `0` | +| `compress_on` | `INTEGER` | `1` | +| `deduplicate_off` | `INTEGER` | `0` | +| `deduplicate_on` | `INTEGER` | `4` | +| `default_csid` | `INTEGER` | `0` | +| `default_lang_ctx` | `INTEGER` | `0` | +| `encrypt_off` | `INTEGER` | `0` | +| `encrypt_on` | `INTEGER` | `1` | +| `file_readonly` | `INTEGER` | `0` | +| `lobmaxsize` | `INTEGER` | `1073741823` | +| `lob_readonly` | `INTEGER` | `0` | +| `lob_readwrite` | `INTEGER` | `1` | +| `no_warning` | `INTEGER` | `0` | +| `opt_compress` | `INTEGER` | `1` | +| `opt_deduplicate` | `INTEGER` | `4` | +| `opt_encrypt` | `INTEGER` | `2` | +| `warn_inconvertible_char` | `INTEGER` | `1` | + +In the following sections, lengths and offsets are measured in bytes if the large objects are `BLOBs`. Lengths and offsets are measured in characters if the large objects are `CLOBs`. + +
+ +append compare converttoblob converttoclob copy erase get_storage_limit getlength instr read substr trim write writeappend + +
diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/07_dbms_lock.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/07_dbms_lock.mdx new file mode 100644 index 00000000000..7fd7ea735d7 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/07_dbms_lock.mdx @@ -0,0 +1,29 @@ +--- +title: "DBMS_LOCK" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_lock.html" +--- + +Advanced Server provides support for the `DBMS_LOCK.SLEEP` procedure. + +| Function/Procedure | Return Type | Description | +| ------------------ | ----------- | --------------------------------------------------------- | +| `SLEEP(seconds)` | n/a | Suspends a session for the specified number of `seconds`. | + +Advanced Server's implementation of `DBMS_LOCK` is a partial implementation when compared to Oracle's version. Only `DBMS_LOCK.SLEEP` is supported. + +## SLEEP + +The `SLEEP` procedure suspends the current session for the specified number of seconds. + +```text +SLEEP( NUMBER) +``` + +**Parameters** + +`seconds` + + `seconds` specifies the number of seconds for which you wish to suspend the session. `seconds` can be a fractional value; for example, enter `1.75` to specify one and three-fourths of a second. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/01_get_mv_dependencies.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/01_get_mv_dependencies.mdx new file mode 100644 index 00000000000..5dd34b6c7c4 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/01_get_mv_dependencies.mdx @@ -0,0 +1,40 @@ +--- +title: "GET_MV_DEPENDENCIES" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/get_mv_dependencies.html" +--- + +When given the name of a materialized view, `GET_MV_DEPENDENCIES` returns a list of items that depend on the specified view. The signature is: + +```text +GET_MV_DEPENDENCIES( + IN VARCHAR2, + OUT VARCHAR2); +``` + +**Parameters** + +`list` + + `list` specifies the name of a materialized view, or a comma-separated list of materialized view names. + +`deplist` + + `deplist` is a comma-separated list of schema-qualified dependencies. `deplist` is a `VARCHAR2` value. + +**Examples** + +The following example: + +```text +DECLARE + deplist VARCHAR2(1000); +BEGIN + DBMS_MVIEW.GET_MV_DEPENDENCIES('public.emp_view', deplist); + DBMS_OUTPUT.PUT_LINE('deplist: ' || deplist); +END; +``` + +Displays a list of the dependencies on a materialized view named `public.emp_view`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/02_refresh.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/02_refresh.mdx new file mode 100644 index 00000000000..53059a30189 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/02_refresh.mdx @@ -0,0 +1,93 @@ +--- +title: "REFRESH" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/refresh.html" +--- + +Use the `REFRESH` procedure to refresh all views specified in either a comma-separated list of view names, or a table of `DBMS_UTILITY.UNCL_ARRAY` values. The procedure has two signatures; use the first form when specifying a comma-separated list of view names: + +```text +REFRESH( + IN VARCHAR2, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN BOOLEAN DEFAULT TRUE, + IN BOOLEAN DEFAULT FALSE, + IN NUMBER DEFAULT 1, + IN NUMBER DEFAULT 0, + IN NUMBER DEFAULT 0, + IN BOOLEAN DEFAULT TRUE, + IN BOOLEAN DEFAULT FALSE); +``` + +Use the second form to specify view names in a table of `DBMS_UTILITY.UNCL_ARRAY` values: + +```text +REFRESH( + IN OUT DBMS_UTILITY.UNCL_ARRAY, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN BOOLEAN DEFAULT TRUE, + IN BOOLEAN DEFAULT FALSE, + IN NUMBER DEFAULT 1, + IN NUMBER DEFAULT 0, + IN NUMBER DEFAULT 0, + IN BOOLEAN DEFAULT TRUE, + IN BOOLEAN DEFAULT FALSE); +``` + +**Parameters** + +`list` + + `list` is a `VARCHAR2` value that specifies the name of a materialized view, or a comma-separated list of materialized view names. The names may be schema-qualified. + +`tab` + + `tab` is a table of `DBMS_UTILITY.UNCL_ARRAY` values that specify the name (or names) of a materialized view. + +`method` + + `method` is a `VARCHAR2` value that specifies the refresh method that will be applied to the specified view (or views). The only supported method is `C`; this performs a complete refresh of the view. + +`rollback_seg` + + `rollback_seg` is accepted for compatibility and ignored. The default is `NULL`. + +`push_deferred_rpc` + + `push_deferred_rpc` is accepted for compatibility and ignored. The default is `TRUE`. + +`refresh_after_errors` + + `refresh_after_errors` is accepted for compatibility and ignored. The default is `FALSE`. + +`purge_option` + + `purge_option` is accepted for compatibility and ignored. The default is `1`. + +`parallelism` + + `parallelism` is accepted for compatibility and ignored. The default is `0`. + +`heap_size IN NUMBER DEFAULT 0`, + + `heap_size` is accepted for compatibility and ignored. The default is `0`. + +`atomic_refresh` + + `atomic_refresh` is accepted for compatibility and ignored. The default is `TRUE`. + +`nested` + + `nested` is accepted for compatibility and ignored. The default is `FALSE`. + +**Examples** + +The following example uses `DBMS_MVIEW.REFRESH` to perform a `COMPLETE` refresh on the `public.emp_view` materialized view: + +```text +EXEC DBMS_MVIEW.REFRESH(list => 'public.emp_view', method => 'C'); +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/03_refresh_all_mviews.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/03_refresh_all_mviews.mdx new file mode 100644 index 00000000000..c080ac9f30c --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/03_refresh_all_mviews.mdx @@ -0,0 +1,54 @@ +--- +title: "REFRESH_ALL_MVIEWS" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/refresh_all_mviews.html" +--- + +Use the `REFRESH_ALL_MVIEWS` procedure to refresh any materialized views that have not been refreshed since the table or view on which the view depends has been modified. The signature is: + +```text +REFRESH_ALL_MVIEWS( + OUT BINARY_INTEGER, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN BOOLEAN DEFAULT FALSE, + IN BOOLEAN DEFAULT TRUE); +``` + +**Parameters** + +`number_of_failures` + + `number_of_failures` is a `BINARY_INTEGER` that specifies the number of failures that occurred during the refresh operation. + +`method` + + `method` is a `VARCHAR2` value that specifies the refresh method that will be applied to the specified view (or views). The only supported method is `C;` this performs a complete refresh of the view. + +`rollback_seg` + + `rollback_seg` is accepted for compatibility and ignored. The default is `NULL`. + +`refresh_after_errors` + + `refresh_after_errors` is accepted for compatibility and ignored. The default is `FALSE`. + +`atomic_refresh` + + `atomic_refresh` is accepted for compatibility and ignored. The default is `TRUE`. + +**Examples** + +The following example performs a `COMPLETE` refresh on all materialized views: + +```text +DECLARE + errors INTEGER; +BEGIN + DBMS_MVIEW.REFRESH_ALL_MVIEWS(errors, method => 'C'); +END; +``` + +Upon completion, `errors` contains the number of failures. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/04_refresh_dependent.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/04_refresh_dependent.mdx new file mode 100644 index 00000000000..ba3cacd3e11 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/04_refresh_dependent.mdx @@ -0,0 +1,84 @@ +--- +title: "REFRESH_DEPENDENT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/refresh_dependent.html" +--- + +Use the `REFRESH_DEPENDENT` procedure to refresh all material views that are dependent on the views specified in the call to the procedure. You can specify a comma-separated list or provide the view names in a table of `DBMS_UTILITY.UNCL_ARRAY` values. + +Use the first form of the procedure to refresh all material views that are dependent on the views specified in a comma-separated list: + +```text +REFRESH_DEPENDENT( + OUT BINARY_INTEGER, + IN VARCHAR2, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL + IN BOOLEAN DEFAULT FALSE, + IN BOOLEAN DEFAULT TRUE, + IN BOOLEAN DEFAULT FALSE); +``` + +Use the second form of the procedure to refresh all material views that are dependent on the views specified in a table of `DBMS_UTILITY.UNCL_ARRAY` values: + +```text +REFRESH_DEPENDENT( + OUT BINARY_INTEGER, + IN DBMS_UTILITY.UNCL_ARRAY, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN BOOLEAN DEFAULT FALSE, + IN BOOLEAN DEFAULT TRUE, + IN BOOLEAN DEFAULT FALSE); +``` + +**Parameters** + +`number_of_failures` + + `number_of_failures` is a `BINARY_INTEGER` that contains the number of failures that occurred during the refresh operation. + +`list` + + `list` is a `VARCHAR2` value that specifies the name of a materialized view, or a comma-separated list of materialized view names. The names may be schema-qualified. + +`tab` + + `tab` is a table of `DBMS_UTILITY.UNCL_ARRAY` values that specify the name (or names) of a materialized view. + +`method` + + `method` is a `VARCHAR2` value that specifies the refresh method that will be applied to the specified view (or views). The only supported method is `C`; this performs a complete refresh of the view. + +`rollback_seg` + + `rollback_seg` is accepted for compatibility and ignored. The default is `NULL`. + +`refresh_after_errors` + + `refresh_after_errors` is accepted for compatibility and ignored. The default is `FALSE`. + +`atomic_refresh` + + `atomic_refresh` is accepted for compatibility and ignored. The default is `TRUE`. + +`nested` + + `nested` is accepted for compatibility and ignored. The default is `FALSE`. + +**Examples** + +The following example performs a `COMPLETE` refresh on all materialized views dependent on a materialized view named `emp_view` that resides in the `public` schema: + +```text +DECLARE + errors INTEGER; +BEGIN + DBMS_MVIEW.REFRESH_DEPENDENT(errors, list => 'public.emp_view', method => +'C'); +END; +``` + +Upon completion, `errors` contains the number of failures. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/index.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/index.mdx new file mode 100644 index 00000000000..f32afd6f152 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/08_dbms_mview/index.mdx @@ -0,0 +1,26 @@ +--- +title: "DBMS_MVIEW" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_mview.html" +--- + +Use procedures in the `DBMS_MVIEW` package to manage and refresh materialized views and their dependencies. Advanced Server provides support for the following `DBMS_MVIEW` procedures: + +| Procedure | Return Type | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `GET_MV_DEPENDENCIES(list VARCHAR2, deplist VARCHAR2);` | n/a | The `GET_MV_DEPENDENCIES` procedure returns a list of dependencies for a specified view. | +| `REFRESH(list VARCHAR2, method VARCHAR2, rollback_seg VARCHAR2 , push_deferred_rpc BOOLEAN, refresh_after_errors BOOLEAN , purge_option NUMBER, parallelism NUMBER, heap_size NUMBER , atomic_refresh BOOLEAN , nested BOOLEAN);` | n/a | This variation of the `REFRESH` procedure refreshes all views named in a comma-separated list of view names. | +| `REFRESH(tab dbms_utility.uncl_array, method VARCHAR2, rollback_seg VARCHAR2, push_deferred_rpc BOOLEAN, refresh_after_errors BOOLEAN, purge_option NUMBER, parallelism NUMBER, heap_size NUMBER, atomic_refresh BOOLEAN, nested BOOLEAN);` | n/a | This variation of the `REFRESH` procedure refreshes all views named in a table of `dbms_utility.uncl_array` values. | +| `REFRESH_ALL_MVIEWS(number_of_failures BINARY_INTEGER, method VARCHAR2, rollback_seg VARCHAR2, refresh_after_errors BOOLEAN, atomic_refresh BOOLEAN);` | n/a | The `REFRESH_ALL_MVIEWS` procedure refreshes all materialized views. | +| `REFRESH_DEPENDENT(number_of_failures BINARY_INTEGER, list VARCHAR2, method VARCHAR2, rollback_seg VARCHAR2, refresh_after_errors BOOLEAN, atomic_refresh BOOLEAN, nested BOOLEAN);` | n/a | This variation of the `REFRESH_DEPENDENT` procedure refreshes all views that are dependent on the views listed in a comma-separated list. | +| `REFRESH_DEPENDENT(number_of_failures BINARY_INTEGER, tab dbms_utility.uncl_array, method VARCHAR2, rollback_seg VARCHAR2, refresh_after_errors BOOLEAN, atomic_refresh BOOLEAN, nested BOOLEAN);` | n/a | This variation of the `REFRESH_DEPENDENT` procedure refreshes all views that are dependent on the views listed in a table of `dbms_utility.uncl_array` values. | + +Advanced Server's implementation of `DBMS_MVIEW` is a partial implementation when compared to Oracle's version. Only those functions and procedures listed in the table above are supported. + +
+ +get_mv_dependencies refresh refresh_all_mviews refresh_dependent + +
diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/09_dbms_output.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/09_dbms_output.mdx new file mode 100644 index 00000000000..6aae5df5c95 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/09_dbms_output.mdx @@ -0,0 +1,426 @@ +--- +title: "DBMS_OUTPUT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_output.html" +--- + +The `DBMS_OUTPUT` package provides the capability to send messages (lines of text) to a message buffer, or get messages from the message buffer. A message buffer is local to a single session. Use the `DBMS_PIPE` package to send messages between sessions. + +The procedures and functions available in the `DBMS_OUTPUT` package are listed in the following table. + +| Function/Procedure | Return Type | Description | +| --------------------------------------- | ----------- | ---------------------------------------------------------------------------------------------------- | +| `DISABLE` | n/a | Disable the capability to send and receive messages. | +| `ENABLE(buffer_size)` | n/a | Enable the capability to send and receive messages. | +| `GET_LINE(line OUT, status OUT)` | n/a | Get a line from the message buffer. | +| `GET_LINES(lines OUT, numlines IN OUT)` | n/a | Get multiple lines from the message buffer. | +| `NEW_LINE` | n/a | Puts an end-of-line character sequence. | +| `PUT(item)` | n/a | Puts a partial line without an end-of-line character sequence. | +| `PUT_LINE(item)` | n/a | Puts a complete line with an end-of-line character sequence. | +| `SERVEROUTPUT(stdout)` | n/a | Direct messages from `PUT, PUT_LINE,` or `NEW_LINE` to either standard output or the message buffer. | + +The following table lists the public variables available in the `DBMS_OUTPUT` package. + +| **Public Variables** | **Data Type** | **Value** | **Description** | +| -------------------- | ------------- | --------- | ------------------ | +| `chararr` | `TABLE` | | For message lines. | + +## CHARARR + +The `CHARARR` is for storing multiple message lines. + +```text +TYPE chararr IS TABLE OF VARCHAR2(32767) INDEX BY BINARY_INTEGER; +``` + +## DISABLE + +The `DISABLE` procedure clears out the message buffer. Any messages in the buffer at the time the `DISABLE` procedure is executed will no longer be accessible. Any messages subsequently sent with the `PUT, PUT_LINE,` or `NEW_LINE` procedures are discarded. No error is returned to the sender when the `PUT, PUT_LINE,` or `NEW_LINE` procedures are executed and messages have been disabled. + +Use the `ENABLE` procedure or `SERVEROUTPUT(TRUE)` procedure to re-enable the sending and receiving of messages. + +```text +DISABLE +``` + +**Examples** + +This anonymous block disables the sending and receiving messages in the current session. + +```text +BEGIN + DBMS_OUTPUT.DISABLE; +END; +``` + +## ENABLE + +The `ENABLE` procedure enables the capability to send messages to the message buffer or retrieve messages from the message buffer. Running `SERVEROUTPUT(TRUE)` also implicitly performs the `ENABLE` procedure. + +The destination of a message sent with `PUT, PUT_LINE,` or `NEW_LINE` depends upon the state of `SERVEROUTPUT`. + +- If the last state of `SERVEROUTPUT` is `TRUE`, the message goes to standard output of the command line. +- If the last state of `SERVEROUTPUT` is `FALSE`, the message goes to the message buffer. + +```text +ENABLE [ ( INTEGER) ] +``` + +**Parameter** + +`buffer_size` + + Maximum length of the message buffer in bytes. If a `buffer_size` of less than 2000 is specified, the buffer size is set to 2000. + +**Examples** + +The following anonymous block enables messages. Setting `SERVEROUTPUT(TRUE)` forces them to standard output. + +```text +BEGIN + DBMS_OUTPUT.ENABLE; + DBMS_OUTPUT.SERVEROUTPUT(TRUE); + DBMS_OUTPUT.PUT_LINE('Messages enabled'); +END; + +Messages enabled +``` + +The same effect could have been achieved by simply using `SERVEROUTPUT(TRUE)`. + +```text +BEGIN + DBMS_OUTPUT.SERVEROUTPUT(TRUE); + DBMS_OUTPUT.PUT_LINE('Messages enabled'); +END; + +Messages enabled +``` + +The following anonymous block enables messages, but setting `SERVEROUTPUT(FALSE)` directs messages to the message buffer. + +```text +BEGIN + DBMS_OUTPUT.ENABLE; + DBMS_OUTPUT.SERVEROUTPUT(FALSE); + DBMS_OUTPUT.PUT_LINE('Message sent to buffer'); +END; +``` + +## GET_LINE + +The `GET_LINE` procedure provides the capability to retrieve a line of text from the message buffer. Only text that has been terminated by an end-of-line character sequence is retrieved – that is complete lines generated using `PUT_LINE`, or by a series of `PUT` calls followed by a `NEW_LINE` call. + +```text +GET_LINE( OUT VARCHAR2, OUT INTEGER) +``` + +**Parameters** + +`line` + + Variable receiving the line of text from the message buffer. + +`status` + + 0 if a line was returned from the message buffer, 1 if there was no line to return. + +**Examples** + +The following anonymous block writes the `emp` table out to the message buffer as a comma-delimited string for each row. + +```text +EXEC DBMS_OUTPUT.SERVEROUTPUT(FALSE); + +DECLARE + v_emprec VARCHAR2(120); + CURSOR emp_cur IS SELECT * FROM emp ORDER BY empno; +BEGIN + DBMS_OUTPUT.ENABLE; + FOR i IN emp_cur LOOP + v_emprec := i.empno || ',' || i.ename || ',' || i.job || ',' || + NVL(LTRIM(TO_CHAR(i.mgr,'9999')),'') || ',' || i.hiredate || + ',' || i.sal || ',' || + NVL(LTRIM(TO_CHAR(i.comm,'9990.99')),'') || ',' || i.deptno; + DBMS_OUTPUT.PUT_LINE(v_emprec); + END LOOP; +END; +``` + +The following anonymous block reads the message buffer and inserts the messages written by the prior example into a table named `messages`. The rows in `messages` are then displayed. + +```text +CREATE TABLE messages ( + status INTEGER, + msg VARCHAR2(100) +); + +DECLARE + v_line VARCHAR2(100); + v_status INTEGER := 0; +BEGIN + DBMS_OUTPUT.GET_LINE(v_line,v_status); + WHILE v_status = 0 LOOP + INSERT INTO messages VALUES(v_status, v_line); + DBMS_OUTPUT.GET_LINE(v_line,v_status); + END LOOP; +END; + +SELECT msg FROM messages; + + msg +----------------------------------------------------------------- + 7369,SMITH,CLERK,7902,17-DEC-80 00:00:00,800.00,,20 + 7499,ALLEN,SALESMAN,7698,20-FEB-81 00:00:00,1600.00,300.00,30 + 7521,WARD,SALESMAN,7698,22-FEB-81 00:00:00,1250.00,500.00,30 + 7566,JONES,MANAGER,7839,02-APR-81 00:00:00,2975.00,,20 + 7654,MARTIN,SALESMAN,7698,28-SEP-81 00:00:00,1250.00,1400.00,30 + 7698,BLAKE,MANAGER,7839,01-MAY-81 00:00:00,2850.00,,30 + 7782,CLARK,MANAGER,7839,09-JUN-81 00:00:00,2450.00,,10 + 7788,SCOTT,ANALYST,7566,19-APR-87 00:00:00,3000.00,,20 + 7839,KING,PRESIDENT,,17-NOV-81 00:00:00,5000.00,,10 + 7844,TURNER,SALESMAN,7698,08-SEP-81 00:00:00,1500.00,0.00,30 + 7876,ADAMS,CLERK,7788,23-MAY-87 00:00:00,1100.00,,20 + 7900,JAMES,CLERK,7698,03-DEC-81 00:00:00,950.00,,30 + 7902,FORD,ANALYST,7566,03-DEC-81 00:00:00,3000.00,,20 + 7934,MILLER,CLERK,7782,23-JAN-82 00:00:00,1300.00,,10 +(14 rows) +``` + +## GET_LINES + +The `GET_LINES` procedure provides the capability to retrieve one or more lines of text from the message buffer into a collection. Only text that has been terminated by an end-of-line character sequence is retrieved – that is complete lines generated using `PUT_LINE`, or by a series of `PUT` calls followed by a `NEW_LINE` call. + +```text +GET_LINES( OUT CHARARR, IN OUT INTEGER) +``` + +**Parameters** + +`lines` + + Table receiving the lines of text from the message buffer. See `CHARARR` for a description of `lines.` + +`numlines IN` + + Number of lines to be retrieved from the message buffer. + +`numlines OUT` + + Actual number of lines retrieved from the message buffer. If the output value of `numlines` is less than the input value, then there are no more lines left in the message buffer. + +**Examples** + +The following example uses the `GET_LINES` procedure to store all rows from the `emp` table that were placed on the message buffer, into an array. + +```text +EXEC DBMS_OUTPUT.SERVEROUTPUT(FALSE); + +DECLARE + v_emprec VARCHAR2(120); + CURSOR emp_cur IS SELECT * FROM emp ORDER BY empno; +BEGIN + DBMS_OUTPUT.ENABLE; + FOR i IN emp_cur LOOP + v_emprec := i.empno || ',' || i.ename || ',' || i.job || ',' || + NVL(LTRIM(TO_CHAR(i.mgr,'9999')),'') || ',' || i.hiredate || + ',' || i.sal || ',' || + NVL(LTRIM(TO_CHAR(i.comm,'9990.99')),'') || ',' || i.deptno; + DBMS_OUTPUT.PUT_LINE(v_emprec); + END LOOP; +END; + +DECLARE + v_lines DBMS_OUTPUT.CHARARR; + v_numlines INTEGER := 14; + v_status INTEGER := 0; +BEGIN + DBMS_OUTPUT.GET_LINES(v_lines,v_numlines); + FOR i IN 1..v_numlines LOOP + INSERT INTO messages VALUES(v_numlines, v_lines(i)); + END LOOP; +END; + +SELECT msg FROM messages; + + msg +----------------------------------------------------------------- + 7369,SMITH,CLERK,7902,17-DEC-80 00:00:00,800.00,,20 + 7499,ALLEN,SALESMAN,7698,20-FEB-81 00:00:00,1600.00,300.00,30 + 7521,WARD,SALESMAN,7698,22-FEB-81 00:00:00,1250.00,500.00,30 + 7566,JONES,MANAGER,7839,02-APR-81 00:00:00,2975.00,,20 + 7654,MARTIN,SALESMAN,7698,28-SEP-81 00:00:00,1250.00,1400.00,30 + 7698,BLAKE,MANAGER,7839,01-MAY-81 00:00:00,2850.00,,30 + 7782,CLARK,MANAGER,7839,09-JUN-81 00:00:00,2450.00,,10 + 7788,SCOTT,ANALYST,7566,19-APR-87 00:00:00,3000.00,,20 + 7839,KING,PRESIDENT,,17-NOV-81 00:00:00,5000.00,,10 + 7844,TURNER,SALESMAN,7698,08-SEP-81 00:00:00,1500.00,0.00,30 + 7876,ADAMS,CLERK,7788,23-MAY-87 00:00:00,1100.00,,20 + 7900,JAMES,CLERK,7698,03-DEC-81 00:00:00,950.00,,30 + 7902,FORD,ANALYST,7566,03-DEC-81 00:00:00,3000.00,,20 + 7934,MILLER,CLERK,7782,23-JAN-82 00:00:00,1300.00,,10 +(14 rows) +``` + +## NEW_LINE + +The `NEW_LINE` procedure writes an end-of-line character sequence in the message buffer. + +```text +NEW_LINE +``` + +**Parameter** + +The `NEW_LINE` procedure expects no parameters. + +## PUT + +The `PUT` procedure writes a string to the message buffer. No end-of-line character sequence is written at the end of the string. Use the `NEW_LINE` procedure to add an end-of-line character sequence. + +```text +PUT( VARCHAR2) +``` + +**Parameter** + +`item` + + Text written to the message buffer. + +**Examples** + +The following example uses the `PUT` procedure to display a comma-delimited list of employees from the `emp` table. + +```text +DECLARE + CURSOR emp_cur IS SELECT * FROM emp ORDER BY empno; +BEGIN + FOR i IN emp_cur LOOP + DBMS_OUTPUT.PUT(i.empno); + DBMS_OUTPUT.PUT(','); + DBMS_OUTPUT.PUT(i.ename); + DBMS_OUTPUT.PUT(','); + DBMS_OUTPUT.PUT(i.job); + DBMS_OUTPUT.PUT(','); + DBMS_OUTPUT.PUT(i.mgr); + DBMS_OUTPUT.PUT(','); + DBMS_OUTPUT.PUT(i.hiredate); + DBMS_OUTPUT.PUT(','); + DBMS_OUTPUT.PUT(i.sal); + DBMS_OUTPUT.PUT(','); + DBMS_OUTPUT.PUT(i.comm); + DBMS_OUTPUT.PUT(','); + DBMS_OUTPUT.PUT(i.deptno); + DBMS_OUTPUT.NEW_LINE; + END LOOP; +END; + +7369,SMITH,CLERK,7902,17-DEC-80 00:00:00,800.00,,20 +7499,ALLEN,SALESMAN,7698,20-FEB-81 00:00:00,1600.00,300.00,30 +7521,WARD,SALESMAN,7698,22-FEB-81 00:00:00,1250.00,500.00,30 +7566,JONES,MANAGER,7839,02-APR-81 00:00:00,2975.00,,20 +7654,MARTIN,SALESMAN,7698,28-SEP-81 00:00:00,1250.00,1400.00,30 +7698,BLAKE,MANAGER,7839,01-MAY-81 00:00:00,2850.00,,30 +7782,CLARK,MANAGER,7839,09-JUN-81 00:00:00,2450.00,,10 +7788,SCOTT,ANALYST,7566,19-APR-87 00:00:00,3000.00,,20 +7839,KING,PRESIDENT,,17-NOV-81 00:00:00,5000.00,,10 +7844,TURNER,SALESMAN,7698,08-SEP-81 00:00:00,1500.00,0.00,30 +7876,ADAMS,CLERK,7788,23-MAY-87 00:00:00,1100.00,,20 +7900,JAMES,CLERK,7698,03-DEC-81 00:00:00,950.00,,30 +7902,FORD,ANALYST,7566,03-DEC-81 00:00:00,3000.00,,20 +7934,MILLER,CLERK,7782,23-JAN-82 00:00:00,1300.00,,10 +``` + +## PUT_LINE + +The `PUT_LINE` procedure writes a single line to the message buffer including an end-of-line character sequence. + +```text +PUT_LINE( VARCHAR2) +``` + +**Parameter** + +`item` + + Text to be written to the message buffer. + +**Examples** + +The following example uses the `PUT_LINE` procedure to display a comma-delimited list of employees from the `emp` table. + +```text +DECLARE + v_emprec VARCHAR2(120); + CURSOR emp_cur IS SELECT * FROM emp ORDER BY empno; +BEGIN + FOR i IN emp_cur LOOP + v_emprec := i.empno || ',' || i.ename || ',' || i.job || ',' || + NVL(LTRIM(TO_CHAR(i.mgr,'9999')),'') || ',' || i.hiredate || + ',' || i.sal || ',' || + NVL(LTRIM(TO_CHAR(i.comm,'9990.99')),'') || ',' || i.deptno; + DBMS_OUTPUT.PUT_LINE(v_emprec); + END LOOP; +END; + +7369,SMITH,CLERK,7902,17-DEC-80 00:00:00,800.00,,20 +7499,ALLEN,SALESMAN,7698,20-FEB-81 00:00:00,1600.00,300.00,30 +7521,WARD,SALESMAN,7698,22-FEB-81 00:00:00,1250.00,500.00,30 +7566,JONES,MANAGER,7839,02-APR-81 00:00:00,2975.00,,20 +7654,MARTIN,SALESMAN,7698,28-SEP-81 00:00:00,1250.00,1400.00,30 +7698,BLAKE,MANAGER,7839,01-MAY-81 00:00:00,2850.00,,30 +7782,CLARK,MANAGER,7839,09-JUN-81 00:00:00,2450.00,,10 +7788,SCOTT,ANALYST,7566,19-APR-87 00:00:00,3000.00,,20 +7839,KING,PRESIDENT,,17-NOV-81 00:00:00,5000.00,,10 +7844,TURNER,SALESMAN,7698,08-SEP-81 00:00:00,1500.00,0.00,30 +7876,ADAMS,CLERK,7788,23-MAY-87 00:00:00,1100.00,,20 +7900,JAMES,CLERK,7698,03-DEC-81 00:00:00,950.00,,30 +7902,FORD,ANALYST,7566,03-DEC-81 00:00:00,3000.00,,20 +7934,MILLER,CLERK,7782,23-JAN-82 00:00:00,1300.00,,10 +``` + +## SERVEROUTPUT + +The `SERVEROUTPUT` procedure provides the capability to direct messages to standard output of the command line or to the message buffer. Setting `SERVEROUTPUT(TRUE)` also performs an implicit execution of `ENABLE`. + +The default setting of `SERVEROUTPUT` is implementation dependent. For example, in Oracle SQL\*Plus, `SERVEROUTPUT(FALSE)` is the default. In PSQL, `SERVEROUTPUT(TRUE)` is the default. Also note that in Oracle SQL\*Plus, this setting is controlled using the SQL\*Plus `SET` command, not by a stored procedure as implemented in Advanced Server. + +```text +SERVEROUTPUT( BOOLEAN) +``` + +**Parameter** + +`stdout` + + Set to `TRUE` if subsequent `PUT, PUT_LINE`, or `NEW_LINE` commands are to send text directly to standard output of the command line. Set to `FALSE` if text is to be sent to the message buffer. + +**Examples** + +The following anonymous block sends the first message to the command line and the second message to the message buffer. + +```text +BEGIN + DBMS_OUTPUT.SERVEROUTPUT(TRUE); + DBMS_OUTPUT.PUT_LINE('This message goes to the command line'); + DBMS_OUTPUT.SERVEROUTPUT(FALSE); + DBMS_OUTPUT.PUT_LINE('This message goes to the message buffer'); +END; + +This message goes to the command line +``` + +If within the same session, the following anonymous block is executed, the message stored in the message buffer from the prior example is flushed and displayed on the command line as well as the new message. + +```text +BEGIN + DBMS_OUTPUT.SERVEROUTPUT(TRUE); + DBMS_OUTPUT.PUT_LINE('Flush messages from the buffer'); +END; + +This message goes to the message buffer +Flush messages from the buffer +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/01_create_pipe.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/01_create_pipe.mdx new file mode 100644 index 00000000000..2cd5b3db2f8 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/01_create_pipe.mdx @@ -0,0 +1,58 @@ +--- +title: "CREATE_PIPE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/create_pipe.html" +--- + +The `CREATE_PIPE` function creates an explicit public pipe or an explicit private pipe with a specified name. + +```text + INTEGER CREATE_PIPE( VARCHAR2 + [, INTEGER ] [, BOOLEAN ]) +``` + +**Parameters** + +`pipename` + + Name of the pipe. + +`maxpipesize` + + Maximum capacity of the pipe in bytes. Default is 8192 bytes. + +`private` + + Create a public pipe if set to `FALSE`. Create a private pipe if set to `TRUE.` This is the default. + +`status` + + Status code returned by the operation. 0 indicates successful creation. + +**Examples** + +The following example creates a private pipe named `messages:` + +```text +DECLARE + v_status INTEGER; +BEGIN + v_status := DBMS_PIPE.CREATE_PIPE('messages'); + DBMS_OUTPUT.PUT_LINE('CREATE_PIPE status: ' || v_status); +END; +CREATE_PIPE status: 0 +``` + +The following example creates a public pipe named `mailbox:` + +```text +DECLARE + v_status INTEGER; +BEGIN + v_status := DBMS_PIPE.CREATE_PIPE('mailbox',8192,FALSE); + DBMS_OUTPUT.PUT_LINE('CREATE_PIPE status: ' || v_status); +END; +CREATE_PIPE status: 0 +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/02_next_item_pipe.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/02_next_item_pipe.mdx new file mode 100644 index 00000000000..1b27aad104d --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/02_next_item_pipe.mdx @@ -0,0 +1,117 @@ +--- +title: "NEXT_ITEM_TYPE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/next_item_pipe.html" +--- + +The `NEXT_ITEM_TYPE` function returns an integer code identifying the data type of the next data item in a message that has been retrieved into the session’s local message buffer. As each item is moved off of the local message buffer with the `UNPACK_MESSAGE` procedure, the `NEXT_ITEM_TYPE` function will return the data type code for the next available item. A code of 0 is returned when there are no more items left in the message. + +```text + INTEGER NEXT_ITEM_TYPE +``` + +**Parameters** + +`typecode` + + Code identifying the data type of the next data item as shown in the following table. + +| Type Code | Data Type | +| --------- | ------------------ | +| `0` | No more data items | +| `9` | `NUMBER` | +| `11` | `VARCHAR2` | +| `13` | `DATE` | +| `23` | `RAW` | + + **Note**: The type codes list in the table are not compatible with Oracle databases. Oracle assigns a different numbering sequence to the data types. + +**Examples** + +The following example shows a pipe packed with a `NUMBER` item, a `VARCHAR2` item, a `DATE` item, and a `RAW` item. A second anonymous block then uses the `NEXT_ITEM_TYPE` function to display the type code of each item. + +```text +DECLARE + v_number NUMBER := 123; + v_varchar VARCHAR2(20) := 'Character data'; + v_date DATE := SYSDATE; + v_raw RAW(4) := '21222324'; + v_status INTEGER; +BEGIN + DBMS_PIPE.PACK_MESSAGE(v_number); + DBMS_PIPE.PACK_MESSAGE(v_varchar); + DBMS_PIPE.PACK_MESSAGE(v_date); + DBMS_PIPE.PACK_MESSAGE(v_raw); + v_status := DBMS_PIPE.SEND_MESSAGE('datatypes'); + DBMS_OUTPUT.PUT_LINE('SEND_MESSAGE status: ' || v_status); +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('SQLERRM: ' || SQLERRM); + DBMS_OUTPUT.PUT_LINE('SQLCODE: ' || SQLCODE); +END; + +SEND_MESSAGE status: 0 + +DECLARE + v_number NUMBER; + v_varchar VARCHAR2(20); + v_date DATE; + v_timestamp TIMESTAMP; + v_raw RAW(4); + v_status INTEGER; +BEGIN + v_status := DBMS_PIPE.RECEIVE_MESSAGE('datatypes'); + DBMS_OUTPUT.PUT_LINE('RECEIVE_MESSAGE status: ' || v_status); + DBMS_OUTPUT.PUT_LINE('----------------------------------'); + + v_status := DBMS_PIPE.NEXT_ITEM_TYPE; + DBMS_OUTPUT.PUT_LINE('NEXT_ITEM_TYPE: ' || v_status); + DBMS_PIPE.UNPACK_MESSAGE(v_number); + DBMS_OUTPUT.PUT_LINE('NUMBER Item : ' || v_number); + DBMS_OUTPUT.PUT_LINE('----------------------------------'); + + v_status := DBMS_PIPE.NEXT_ITEM_TYPE; + DBMS_OUTPUT.PUT_LINE('NEXT_ITEM_TYPE: ' || v_status); + DBMS_PIPE.UNPACK_MESSAGE(v_varchar); + DBMS_OUTPUT.PUT_LINE('VARCHAR2 Item : ' || v_varchar); + DBMS_OUTPUT.PUT_LINE('----------------------------------'); + + v_status := DBMS_PIPE.NEXT_ITEM_TYPE; + DBMS_OUTPUT.PUT_LINE('NEXT_ITEM_TYPE: ' || v_status); + DBMS_PIPE.UNPACK_MESSAGE(v_date); + DBMS_OUTPUT.PUT_LINE('DATE Item : ' || v_date); + DBMS_OUTPUT.PUT_LINE('----------------------------------'); + + v_status := DBMS_PIPE.NEXT_ITEM_TYPE; + DBMS_OUTPUT.PUT_LINE('NEXT_ITEM_TYPE: ' || v_status); + DBMS_PIPE.UNPACK_MESSAGE(v_raw); + DBMS_OUTPUT.PUT_LINE('RAW Item : ' || v_raw); + DBMS_OUTPUT.PUT_LINE('----------------------------------'); + + v_status := DBMS_PIPE.NEXT_ITEM_TYPE; + DBMS_OUTPUT.PUT_LINE('NEXT_ITEM_TYPE: ' || v_status); + DBMS_OUTPUT.PUT_LINE('---------------------------------'); +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('SQLERRM: ' || SQLERRM); + DBMS_OUTPUT.PUT_LINE('SQLCODE: ' || SQLCODE); +END; + +RECEIVE_MESSAGE status: 0 +---------------------------------- +NEXT_ITEM_TYPE: 9 +NUMBER Item : 123 +---------------------------------- +NEXT_ITEM_TYPE: 11 +VARCHAR2 Item : Character data +---------------------------------- +NEXT_ITEM_TYPE: 13 +DATE Item : 02-OCT-07 11:11:43 +---------------------------------- +NEXT_ITEM_TYPE: 23 +RAW Item : 21222324 +---------------------------------- +NEXT_ITEM_TYPE: 0 +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/03_pack_message.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/03_pack_message.mdx new file mode 100644 index 00000000000..88836cf3aad --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/03_pack_message.mdx @@ -0,0 +1,21 @@ +--- +title: "PACK_MESSAGE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/pack_message.html" +--- + +The `PACK_MESSAGE` procedure places an item of data in the session’s local message buffer. `PACK_MESSAGE` must be executed at least once before issuing a `SEND_MESSAGE` call. + +```text +PACK_MESSAGE( { DATE | NUMBER | VARCHAR2 | RAW }) +``` + +Use the `UNPACK_MESSAGE` procedure to obtain data items once the message is retrieved using a `RECEIVE_MESSAGE` call. + +**Parameters** + +`item` + + An expression evaluating to any of the acceptable parameter data types. The value is added to the session’s local message buffer. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/04_purge.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/04_purge.mdx new file mode 100644 index 00000000000..ffc5d6f7806 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/04_purge.mdx @@ -0,0 +1,79 @@ +--- +title: "PURGE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/purge.html" +--- + +The `PURGE` procedure removes the unreceived messages from a specified implicit pipe. + +```text +PURGE( VARCHAR2) +``` + +Use the `REMOVE_PIPE` function to delete an explicit pipe. + +**Parameters** + +`pipename` + + Name of the pipe. + +**Examples** + +Two messages are sent on a pipe: + +```text +DECLARE + v_status INTEGER; +BEGIN + DBMS_PIPE.PACK_MESSAGE('Message #1'); + v_status := DBMS_PIPE.SEND_MESSAGE('pipe'); + DBMS_OUTPUT.PUT_LINE('SEND_MESSAGE status: ' || v_status); + + DBMS_PIPE.PACK_MESSAGE('Message #2'); + v_status := DBMS_PIPE.SEND_MESSAGE('pipe'); + DBMS_OUTPUT.PUT_LINE('SEND_MESSAGE status: ' || v_status); +END; + +SEND_MESSAGE status: 0 +SEND_MESSAGE status: 0 +``` + +Receive the first message and unpack it: + +```text +DECLARE + v_item VARCHAR2(80); + v_status INTEGER; +BEGIN + v_status := DBMS_PIPE.RECEIVE_MESSAGE('pipe',1); + DBMS_OUTPUT.PUT_LINE('RECEIVE_MESSAGE status: ' || v_status); + DBMS_PIPE.UNPACK_MESSAGE(v_item); + DBMS_OUTPUT.PUT_LINE('Item: ' || v_item); +END; + +RECEIVE_MESSAGE status: 0 +Item: Message #1 +``` + +Purge the pipe: + +```text +EXEC DBMS_PIPE.PURGE('pipe'); +``` + +Try to retrieve the next message. The `RECEIVE_MESSAGE` call returns status code 1 indicating it timed out because no message was available. + +```text +DECLARE + v_item VARCHAR2(80); + v_status INTEGER; +BEGIN + v_status := DBMS_PIPE.RECEIVE_MESSAGE('pipe',1); + DBMS_OUTPUT.PUT_LINE('RECEIVE_MESSAGE status: ' || v_status); +END; + +RECEIVE_MESSAGE status: 1 +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/05_receive_message.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/05_receive_message.mdx new file mode 100644 index 00000000000..8e191e84743 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/05_receive_message.mdx @@ -0,0 +1,36 @@ +--- +title: "RECEIVE_MESSAGE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/receive_message.html" +--- + +The `RECEIVE_MESSAGE` function obtains a message from a specified pipe. + +```text + INTEGER RECEIVE_MESSAGE( VARCHAR2 + [, INTEGER ]) +``` + +**Parameters** + +`pipename` + + Name of the pipe. + +`timeout` + + Wait time (seconds). Default is 86400000 (1000 days). + +`status` + + Status code returned by the operation. + + The possible status codes are: + +| Status Code | Description | +| ----------- | -------------------------------- | +| `0` | Success | +| `1` | Time out | +| `2` | Message too large for the buffer | diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/06_remove_pipe.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/06_remove_pipe.mdx new file mode 100644 index 00000000000..4da0593ff95 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/06_remove_pipe.mdx @@ -0,0 +1,92 @@ +--- +title: "REMOVE_PIPE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/remove_pipe.html" +--- + +The `REMOVE_PIPE` function deletes an explicit private or explicit public pipe. + +```text + INTEGER REMOVE_PIPE( VARCHAR2) +``` + +Use the `REMOVE_PIPE` function to delete explicitly created pipes – i.e., pipes created with the `CREATE_PIPE` function. + +**Parameters** + +`pipename` + + Name of the pipe. + +`status` + + Status code returned by the operation. A status code of 0 is returned even if the named pipe is non-existent. + +**Examples** + +Two messages are sent on a pipe: + +```text +DECLARE + v_status INTEGER; +BEGIN + v_status := DBMS_PIPE.CREATE_PIPE('pipe'); + DBMS_OUTPUT.PUT_LINE('CREATE_PIPE status : ' || v_status); + + DBMS_PIPE.PACK_MESSAGE('Message #1'); + v_status := DBMS_PIPE.SEND_MESSAGE('pipe'); + DBMS_OUTPUT.PUT_LINE('SEND_MESSAGE status: ' || v_status); + + DBMS_PIPE.PACK_MESSAGE('Message #2'); + v_status := DBMS_PIPE.SEND_MESSAGE('pipe'); + DBMS_OUTPUT.PUT_LINE('SEND_MESSAGE status: ' || v_status); +END; + +CREATE_PIPE status : 0 +SEND_MESSAGE status: 0 +SEND_MESSAGE status: 0 +``` + +Receive the first message and unpack it: + +```text +DECLARE + v_item VARCHAR2(80); + v_status INTEGER; +BEGIN + v_status := DBMS_PIPE.RECEIVE_MESSAGE('pipe',1); + DBMS_OUTPUT.PUT_LINE('RECEIVE_MESSAGE status: ' || v_status); + DBMS_PIPE.UNPACK_MESSAGE(v_item); + DBMS_OUTPUT.PUT_LINE('Item: ' || v_item); +END; + +RECEIVE_MESSAGE status: 0 +Item: Message #1 +``` + +Remove the pipe: + +```text +SELECT DBMS_PIPE.REMOVE_PIPE('pipe') FROM DUAL; + +remove_pipe +------------- + 0 +(1 row) +``` + +Try to retrieve the next message. The `RECEIVE_MESSAGE` call returns status code 1 indicating it timed out because the pipe had been deleted. + +```text +DECLARE + v_item VARCHAR2(80); + v_status INTEGER; +BEGIN + v_status := DBMS_PIPE.RECEIVE_MESSAGE('pipe',1); + DBMS_OUTPUT.PUT_LINE('RECEIVE_MESSAGE status: ' || v_status); +END; + +RECEIVE_MESSAGE status: 1 +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/07_reset_buffer.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/07_reset_buffer.mdx new file mode 100644 index 00000000000..bc115423fea --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/07_reset_buffer.mdx @@ -0,0 +1,54 @@ +--- +title: "RESET_BUFFER" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/reset_buffer.html" +--- + +The `RESET_BUFFER` procedure resets a “pointer” to the session’s local message buffer back to the beginning of the buffer. This has the effect of causing subsequent `PACK_MESSAGE` calls to overwrite any data items that existed in the message buffer prior to the `RESET_BUFFER` call. + +```text +RESET_BUFFER +``` + +**Examples** + +A message to John is written to the local message buffer. It is replaced by a message to Bob by calling `RESET_BUFFER`. The message is sent on the pipe. + +```text +DECLARE + v_status INTEGER; +BEGIN + DBMS_PIPE.PACK_MESSAGE('Hi, John'); + DBMS_PIPE.PACK_MESSAGE('Can you attend a meeting at 3:00, today?'); + DBMS_PIPE.PACK_MESSAGE('If not, is tomorrow at 8:30 ok with you?'); + DBMS_PIPE.RESET_BUFFER; + DBMS_PIPE.PACK_MESSAGE('Hi, Bob'); + DBMS_PIPE.PACK_MESSAGE('Can you attend a meeting at 9:30, tomorrow?'); + v_status := DBMS_PIPE.SEND_MESSAGE('pipe'); + DBMS_OUTPUT.PUT_LINE('SEND_MESSAGE status: ' || v_status); +END; + +SEND_MESSAGE status: 0 +``` + +The message to Bob is in the received message. + +```text +DECLARE + v_item VARCHAR2(80); + v_status INTEGER; +BEGIN + v_status := DBMS_PIPE.RECEIVE_MESSAGE('pipe',1); + DBMS_OUTPUT.PUT_LINE('RECEIVE_MESSAGE status: ' || v_status); + DBMS_PIPE.UNPACK_MESSAGE(v_item); + DBMS_OUTPUT.PUT_LINE('Item: ' || v_item); + DBMS_PIPE.UNPACK_MESSAGE(v_item); + DBMS_OUTPUT.PUT_LINE('Item: ' || v_item); +END; + +RECEIVE_MESSAGE status: 0 +Item: Hi, Bob +Item: Can you attend a meeting at 9:30, tomorrow? +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/08_send_message.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/08_send_message.mdx new file mode 100644 index 00000000000..72a215e9d5e --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/08_send_message.mdx @@ -0,0 +1,40 @@ +--- +title: "SEND_MESSAGE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/send_message.html" +--- + +The `SEND_MESSAGE` function sends a message from the session’s local message buffer to the specified pipe. + +```text + SEND_MESSAGE( VARCHAR2 [, INTEGER ] + [, INTEGER ]) +``` + +**Parameters** + +`pipename` + + Name of the pipe. + +`timeout` + + Wait time (seconds). Default is 86400000 (1000 days). + +`maxpipesize` + + Maximum capacity of the pipe in bytes. Default is 8192 bytes. + +`status` + + Status code returned by the operation. + + The possible status codes are: + +| Status Code | Description | +| ----------- | -------------------- | +| `0` | Success | +| `1` | Time out | +| `3` | Function interrupted | diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/09_unique_session_name.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/09_unique_session_name.mdx new file mode 100644 index 00000000000..2bd1aab8753 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/09_unique_session_name.mdx @@ -0,0 +1,34 @@ +--- +title: "UNIQUE_SESSION_NAME" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/unique_session_name.html" +--- + +The `UNIQUE_SESSION_NAME` function returns a name, unique to the current session. + +```text + VARCHAR2 UNIQUE_SESSION_NAME +``` + +**Parameters** + +`name` + + Unique session name. + +**Examples** + +The following anonymous block retrieves and displays a unique session name. + +```text +DECLARE + v_session VARCHAR2(30); +BEGIN + v_session := DBMS_PIPE.UNIQUE_SESSION_NAME; + DBMS_OUTPUT.PUT_LINE('Session Name: ' || v_session); +END; + +Session Name: PG$PIPE$5$2752 +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/10_unpack_message.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/10_unpack_message.mdx new file mode 100644 index 00000000000..8029ada211f --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/10_unpack_message.mdx @@ -0,0 +1,19 @@ +--- +title: "UNPACK_MESSAGE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/unpack_message.html" +--- + +The `UNPACK_MESSAGE` procedure copies the data items of a message from the local message buffer to a specified program variable. The message must be placed in the local message buffer with the `RECEIVE_MESSAGE` function before using `UNPACK_MESSAGE`. + +```text +UNPACK_MESSAGE( OUT { DATE | NUMBER | VARCHAR2 | RAW }) +``` + +**Parameters** + +`item` + + Type-compatible variable that receives a data item from the local message buffer. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/11_comprehensive_example.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/11_comprehensive_example.mdx new file mode 100644 index 00000000000..be3b33e6939 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/11_comprehensive_example.mdx @@ -0,0 +1,158 @@ +--- +title: "Comprehensive Example" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/comprehensive_example.html" +--- + +The following example uses a pipe as a “mailbox”. The procedures to create the mailbox, add a multi-item message to the mailbox (up to three items), and display the full contents of the mailbox are enclosed in a package named, `mailbox`. + +```text +CREATE OR REPLACE PACKAGE mailbox +IS + PROCEDURE create_mailbox; + PROCEDURE add_message ( + p_mailbox VARCHAR2, + p_item_1 VARCHAR2, + p_item_2 VARCHAR2 DEFAULT 'END', + p_item_3 VARCHAR2 DEFAULT 'END' + ); + PROCEDURE empty_mailbox ( + p_mailbox VARCHAR2, + p_waittime INTEGER DEFAULT 10 + ); +END mailbox; + +CREATE OR REPLACE PACKAGE BODY mailbox +IS + PROCEDURE create_mailbox + IS + v_mailbox VARCHAR2(30); + v_status INTEGER; + BEGIN + v_mailbox := DBMS_PIPE.UNIQUE_SESSION_NAME; + v_status := DBMS_PIPE.CREATE_PIPE(v_mailbox,1000,FALSE); + IF v_status = 0 THEN + DBMS_OUTPUT.PUT_LINE('Created mailbox: ' || v_mailbox); + ELSE + DBMS_OUTPUT.PUT_LINE('CREATE_PIPE failed - status: ' || + v_status); + END IF; + END create_mailbox; + + PROCEDURE add_message ( + p_mailbox VARCHAR2, + p_item_1 VARCHAR2, + p_item_2 VARCHAR2 DEFAULT 'END', + p_item_3 VARCHAR2 DEFAULT 'END' + ) + IS + v_item_cnt INTEGER := 0; + v_status INTEGER; + BEGIN + DBMS_PIPE.PACK_MESSAGE(p_item_1); + v_item_cnt := 1; + IF p_item_2 != 'END' THEN + DBMS_PIPE.PACK_MESSAGE(p_item_2); + v_item_cnt := v_item_cnt + 1; + END IF; + IF p_item_3 != 'END' THEN + DBMS_PIPE.PACK_MESSAGE(p_item_3); + v_item_cnt := v_item_cnt + 1; + END IF; + v_status := DBMS_PIPE.SEND_MESSAGE(p_mailbox); + IF v_status = 0 THEN + DBMS_OUTPUT.PUT_LINE('Added message with ' || v_item_cnt || + ' item(s) to mailbox ' || p_mailbox); + ELSE + DBMS_OUTPUT.PUT_LINE('SEND_MESSAGE in add_message failed - ' || + 'status: ' || v_status); + END IF; + END add_message; + + PROCEDURE empty_mailbox ( + p_mailbox VARCHAR2, + p_waittime INTEGER DEFAULT 10 + ) + IS + v_msgno INTEGER DEFAULT 0; + v_itemno INTEGER DEFAULT 0; + v_item VARCHAR2(100); + v_status INTEGER; + BEGIN + v_status := DBMS_PIPE.RECEIVE_MESSAGE(p_mailbox,p_waittime); + WHILE v_status = 0 LOOP + v_msgno := v_msgno + 1; + DBMS_OUTPUT.PUT_LINE('****** Start message #' || v_msgno || + ' ******'); + BEGIN + LOOP + v_status := DBMS_PIPE.NEXT_ITEM_TYPE; + EXIT WHEN v_status = 0; + DBMS_PIPE.UNPACK_MESSAGE(v_item); + v_itemno := v_itemno + 1; + DBMS_OUTPUT.PUT_LINE('Item #' || v_itemno || ': ' || + v_item); + END LOOP; + DBMS_OUTPUT.PUT_LINE('******* End message #' || v_msgno || + ' *******'); + DBMS_OUTPUT.PUT_LINE('*'); + v_itemno := 0; + v_status := DBMS_PIPE.RECEIVE_MESSAGE(p_mailbox,1); + END; + END LOOP; + DBMS_OUTPUT.PUT_LINE('Number of messages received: ' || v_msgno); + v_status := DBMS_PIPE.REMOVE_PIPE(p_mailbox); + IF v_status = 0 THEN + DBMS_OUTPUT.PUT_LINE('Deleted mailbox ' || p_mailbox); + ELSE + DBMS_OUTPUT.PUT_LINE('Could not delete mailbox - status: ' + || v_status); + END IF; + END empty_mailbox; +END mailbox; +``` + +The following demonstrates the execution of the procedures in `mailbox`. The first procedure creates a public pipe using a name generated by the `UNIQUE_SESSION_NAME` function. + +```text +EXEC mailbox.create_mailbox; + +Created mailbox: PG$PIPE$13$3940 +``` + +Using the mailbox name, any user in the same database with access to the `mailbox` package and `DBMS_PIPE` package can add messages: + +```text +EXEC mailbox.add_message('PG$PIPE$13$3940','Hi, John','Can you attend a +meeting at 3:00, today?','-- Mary'); + +Added message with 3 item(s) to mailbox PG$PIPE$13$3940 + +EXEC mailbox.add_message('PG$PIPE$13$3940','Don''t forget to submit your +report','Thanks,','-- Joe'); + +Added message with 3 item(s) to mailbox PG$PIPE$13$3940 +``` + +Finally, the contents of the mailbox can be emptied: + +```text +EXEC mailbox.empty_mailbox('PG$PIPE$13$3940'); + +****** Start message #1 ****** +Item #1: Hi, John +Item #2: Can you attend a meeting at 3:00, today? +Item #3: -- Mary +******* End message #1 ******* +* +****** Start message #2 ****** +Item #1: Don't forget to submit your report +Item #2: Thanks, +Item #3: Joe +******* End message #2 ******* +* +Number of messages received: 2 +Deleted mailbox PG$PIPE$13$3940 +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/index.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/index.mdx new file mode 100644 index 00000000000..042a576782c --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/10_dbms_pipe/index.mdx @@ -0,0 +1,42 @@ +--- +title: "DBMS_PIPE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_pipe.html" +--- + +The `DBMS_PIPE` package provides the capability to send messages through a pipe within or between sessions connected to the same database cluster. + +The procedures and functions available in the `DBMS_PIPE` package are listed in the following table: + +| Function/Procedure | Return Type | Description | +| ------------------------------------------------------ | ----------- | --------------------------------------------------------------------------------------------------------------- | +| `CREATE_PIPE(pipename [, maxpipesize ] [, private ])` | `INTEGER` | Explicitly create a private pipe if `private` is “true” (the default) or a public pipe if `private` is “false”. | +| `NEXT_ITEM_TYPE` | `INTEGER` | Determine the data type of the next item in a received message. | +| `PACK_MESSAGE(item)` | n/a | Place `item` in the session’s local message buffer. | +| `PURGE(pipename)` | n/a | Remove unreceived messages from the specified pipe. | +| `RECEIVE_MESSAGE(pipename [, timeout ])` | `INTEGER` | Get a message from a specified pipe. | +| `REMOVE_PIPE(pipename)` | `INTEGER` | Delete an explicitly created pipe. | +| `RESET_BUFFER` | n/a | Reset the local message buffer. | +| `SEND_MESSAGE(pipename [, timeout ] [, maxpipesize ])` | `INTEGER` | Send a message on a pipe. | +| `UNIQUE_SESSION_NAME` | `VARCHAR2` | Obtain a unique session name. | +| `UNPACK_MESSAGE(item OUT)` | n/a | Retrieve the next data item from a message into a type-compatible variable, `item`. | + +Pipes are categorized as implicit or explicit. An *implicit pipe* is created if a reference is made to a pipe name that was not previously created by the `CREATE_PIPE` function. For example, if the `SEND_MESSAGE` function is executed using a non-existent pipe name, a new implicit pipe is created with that name. An *explicit pipe* is created using the `CREATE_PIPE` function whereby the first parameter specifies the pipe name for the new pipe. + +Pipes are also categorized as private or public. A *private pipe* can only be accessed by the user who created the pipe. Even a superuser cannot access a private pipe that was created by another user. A *public pipe* can be accessed by any user who has access to the `DBMS_PIPE` package. + +A public pipe can only be created by using the `CREATE_PIPE` function with the third parameter set to `FALSE`. The `CREATE_PIPE` function can be used to create a private pipe by setting the third parameter to `TRUE` or by omitting the third parameter. All implicit pipes are private. + +The individual data items or “lines” of a message are first built-in a *local message buffer*, unique to the current session. The `PACK_MESSAGE` procedure builds the message in the session’s local message buffer. The `SEND_MESSAGE` function is then used to send the message through the pipe. + +Receipt of a message involves the reverse operation. The `RECEIVE_MESSAGE` function is used to get a message from the specified pipe. The message is written to the session’s local message buffer. The `UNPACK_MESSAGE` procedure is then used to transfer the message data items from the message buffer to program variables. If a pipe contains multiple messages, `RECEIVE_MESSAGE` gets the messages in *FIFO* (first-in-first-out) order. + +Each session maintains separate message buffers for messages created with the `PACK_MESSAGE` procedure and messages retrieved by the `RECEIVE_MESSAGE` function. Thus messages can be both built and received in the same session. However, if consecutive `RECEIVE_MESSAGE` calls are made, only the message from the last `RECEIVE_MESSAGE` call will be preserved in the local message buffer. + +
+ +create_pipe next_item_pipe pack_message purge receive_message remove_pipe reset_buffer send_message unique_session_name unpack_message comprehensive_example + +
diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/11_dbms_profiler.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/11_dbms_profiler.mdx new file mode 100644 index 00000000000..aa11b637744 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/11_dbms_profiler.mdx @@ -0,0 +1,838 @@ +--- +title: "DBMS_PROFILER" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_profiler.html" +--- + +The `DBMS_PROFILER` package collects and stores performance information about the PL/pgSQL and SPL statements that are executed during a performance profiling session; use the functions and procedures listed below to control the profiling tool. + +| Function/Procedure | Return Type | Description | +| --------------------------------------------------------------- | ------------------------ | ---------------------------------------------------------------------------------------------------------------- | +| `FLUSH_DATA` | Status Code or Exception | Flushes performance data collected in the current session without terminating the session (profiling continues). | +| `GET_VERSION(major OUT, minor OUT)` | n/a | Returns the version number of this package. | +| `INTERNAL_VERSION_CHECK` | Status Code | Confirms that the current version of the profiler will work with the current database. | +| `PAUSE_PROFILER` | Status Code or Exception | Pause data collection. | +| `RESUME_PROFILER` | Status Code or Exception | Resume data collection. | +| `START_PROFILER(run_comment, run_comment1 [, run_number OUT ])` | Status Code or Exception | Start data collection. | +| `STOP_PROFILER` | Status Code or Exception | Stop data collection and flush performance data to the `PLSQL_PROFILER_RAWDATA` table. | + +The functions within the `DBMS_PROFILER` package return a status code to indicate success or failure; the `DBMS_PROFILER` procedures raise an exception only if they encounter a failure. The status codes and messages returned by the functions, and the exceptions raised by the procedures are listed in the table below. + +| Status Code | Message | Exception | Description | +| ----------- | --------------- | ------------------ | ------------------------------------------------------- | +| `-1` | `error version` | `version_mismatch` | The profiler version and the database are incompatible. | +| `0` | `success` | n/a | The operation completed successfully. | +| `1` | `error_param` | `profiler_error` | The operation received an incorrect parameter. | +| `2` | `error_io` | `profiler_error` | The data flush operation has failed. | + +## FLUSH_DATA + +The `FLUSH_DATA` function/procedure flushes the data collected in the current session without terminating the profiler session. The data is flushed to the tables described in the Advanced Server Performance Features Guide. The function and procedure signatures are: + +```text + INTEGER FLUSH_DATA + +FLUSH_DATA +``` + +**Parameters** + +`status` + + Status code returned by the operation. + +## GET_VERSION + +The `GET_VERSION` procedure returns the version of `DBMS_PROFILER`. The procedure signature is: + +```text +GET_VERSION( OUT INTEGER, OUT INTEGER) +``` + +**Parameters** + +`major` + + The major version number of `DBMS_PROFILER`. + +`minor` + + The minor version number of `DBMS_PROFILER`. + +## INTERNAL_VERSION_CHECK + +The `INTERNAL_VERSION_CHECK` function confirms that the current version of `DBMS_PROFILER` will work with the current database. The function signature is: + +```text + INTEGER INTERNAL_VERSION_CHECK +``` + +**Parameters** + +`status` + + Status code returned by the operation. + +## PAUSE_PROFILER + +The `PAUSE_PROFILER` function/procedure pauses a profiling session. The function and procedure signatures are: + +```text + INTEGER PAUSE_PROFILER + +PAUSE_PROFILER +``` + +**Parameters** + +`status` + + Status code returned by the operation. + +## RESUME_PROFILER + +The `RESUME_PROFILER` function/procedure pauses a profiling session. The function and procedure signatures are: + +```text + INTEGER RESUME_PROFILER + +RESUME_PROFILER +``` + +**Parameters** + +`status` + + Status code returned by the operation. + +## START_PROFILER + +The `START_PROFILER` function/procedure starts a data collection session. The function and procedure signatures are: + +```text + INTEGER START_PROFILER( TEXT := SYSDATE, + TEXT := '' [, OUT INTEGER ]) + +START_PROFILER( TEXT := SYSDATE, + TEXT := '' [, OUT INTEGER ]) +``` + +**Parameters** + +`run_comment` + + A user-defined comment for the profiler session. The default value is `SYSDATE`. + +`run_comment1` + + An additional user-defined comment for the profiler session. The default value is ''. + +`run_number` + + The session number of the profiler session. + +`status` + + Status code returned by the operation. + +## STOP_PROFILER + +The `STOP_PROFILER` function/procedure stops a profiling session and flushes the performance information to the `DBMS_PROFILER` tables and view. The function and procedure signatures are: + +```text + INTEGER STOP_PROFILER + +STOP_PROFILER +``` + +**Parameters** + +`status` + + Status code returned by the operation. + +## Using DBMS_PROFILER + +The `DBMS_PROFILER` package collects and stores performance information about the PL/pgSQL and SPL statements that are executed during a profiling session; you can review the performance information in the tables and views provided by the profiler. + +`DBMS_PROFILER` works by recording a set of performance-related counters and timers for each line of PL/pgSQL or SPL statement that executes within a profiling session. The counters and timers are stored in a table named `SYS.PLSQL_PROFILER_DATA`. When you complete a profiling session, `DBMS_PROFILER` will write a row to the performance statistics table for each line of PL/pgSQL or SPL code that executed within the session. For example, if you execute the following function: + +```text +1 - CREATE OR REPLACE FUNCTION getBalance(acctNumber INTEGER) +2 - RETURNS NUMERIC AS $$ +3 - DECLARE +4 - result NUMERIC; +5 - BEGIN +6 - SELECT INTO result balance FROM acct WHERE id = acctNumber; +7 - +8 - IF (result IS NULL) THEN +9 - RAISE INFO 'Balance is null'; +10- END IF; +11- +12- RETURN result; +13- END; +14- $$ LANGUAGE 'plpgsql'; +``` + +`DBMS_PROFILER` adds one `PLSQL_PROFILER_DATA` entry for each line of code within the `getBalance()` function (including blank lines and comments). The entry corresponding to the `SELECT` statement executed exactly one time; and required a very small amount of time to execute. On the other hand, the entry corresponding to the `RAISE INFO` statement executed once or not at all (depending on the value for the `balance` column). + +Some of the lines in this function contain no executable code so the performance statistics for those lines will always contain zero values. + +To start a profiling session, invoke the `DBMS_PROFILER.START_PROFILER` function (or procedure). Once you've invoked `START_PROFILER`, Advanced Server will profile every PL/pgSQL or SPL function, procedure, trigger, or anonymous block that your session executes until you either stop or pause the profiler (by calling `STOP_PROFILER` or `PAUSE_PROFILER`). + +It is important to note that when you start (or resume) the profiler, the profiler will only gather performance statistics for functions/procedures/triggers that start after the call to `START_PROFILER` (or `RESUME_PROFILER`). + +While the profiler is active, Advanced Server records a large set of timers and counters in memory; when you invoke the `STOP_PROFILER` (or `FLUSH_DATA`) function/procedure, `DBMS_PROFILER` writes those timers and counters to a set of three tables: + +- `SYS.PLSQL_PROFILER_RAWDATA` + + Contains the performance counters and timers for each statement executed within the session. + +- `SYS.PLSQL_PROFILER_RUNS` + + Contains a summary of each run (aggregating the information found in `PLSQL_PROFILER_RAWDATA`). + +- `SYS.PLSQL_PROFILER_UNITS` + + Contains a summary of each code unit (function, procedure, trigger, or anonymous block) executed within a session. + +In addition, `DBMS_PROFILER` defines a view, `SYS.PLSQL_PROFILER_DATA`, which contains a subset of the `PLSQL_PROFILER_RAWDATA` table. + +Please note that a non-superuser may gather profiling information, but may not view that profiling information unless a superuser grants specific privileges on the profiling tables (stored in the `SYS` schema). This permits a non-privileged user to gather performance statistics without exposing information that the administrator may want to keep secret. + +### Querying the DBMS_PROFILER Tables and View + +The following step-by-step example uses `DBMS_PROFILER` to retrieve performance information for procedures, functions, and triggers included in the sample data distributed with Advanced Server. + +1. Open the EDB-PSQL command line, and establish a connection to the Advanced Server database. Use an `EXEC` statement to start the profiling session: + +```text +acctg=# EXEC dbms_profiler.start_profiler('profile list_emp'); + +EDB-SPL Procedure successfully completed +``` + +!!! Note + (The call to `start_profiler()` includes a comment that `DBMS_PROFILER` associates with the profiler session). + +2. Then call the `list_emp` function: + +```text +acctg=# SELECT list_emp(); +INFO: EMPNO ENAME +INFO: ----- ------- +INFO: 7369 SMITH +INFO: 7499 ALLEN +INFO: 7521 WARD +INFO: 7566 JONES +INFO: 7654 MARTIN +INFO: 7698 BLAKE +INFO: 7782 CLARK +INFO: 7788 SCOTT +INFO: 7839 KING +INFO: 7844 TURNER +INFO: 7876 ADAMS +INFO: 7900 JAMES +INFO: 7902 FORD +INFO: 7934 MILLER + list_emp +---------- + +(1 row) +``` + +3. Stop the profiling session with a call to `dbms_profiler.stop_profiler:` + +```text +acctg=# EXEC dbms_profiler.stop_profiler; + +EDB-SPL Procedure successfully completed +``` + +4. Start a new session with the `dbms_profiler.start_profiler` function (followed by a new comment): + +```text +acctg=# EXEC dbms_profiler.start_profiler('profile get_dept_name and +emp_sal_trig'); + +EDB-SPL Procedure successfully completed +``` + +5. Invoke the `get_dept_name` function: + +```text +acctg=# SELECT get_dept_name(10); + get_dept_name +--------------- + ACCOUNTING +(1 row) +``` + +6. Execute an `UPDATE` statement that causes a trigger to execute: + +```text +acctg=# UPDATE memp SET sal = 500 WHERE empno = 7902; +INFO: Updating employee 7902 +INFO: ..Old salary: 3000.00 +INFO: ..New salary: 500.00 +INFO: ..Raise: -2500.00 +INFO: User enterprisedb updated employee(s) on 04-FEB-14 +UPDATE 1 +``` + +7. Terminate the profiling session and flush the performance information to the profiling tables: + +```text +acctg=# EXEC dbms_profiler.stop_profiler; + +EDB-SPL Procedure successfully completed +``` + +8. Now, query the `plsql_profiler_runs` table to view a list of the profiling sessions, arranged by `runid:` + +```text +acctg=# SELECT * FROM plsql_profiler_runs; + runid | related_run | run_owner | run_date + | run_comment | run_total_time | run_system_info + | run_comment1 | spare1 +-------+-------------+--------------+---------------------------+------------- +---------------------------+----------------+-----------------+-------------- ++-------- + 1 | | enterprisedb | 04-FEB-14 09:32:48.874315 | profile + list_emp | 4154 | + | | + 2 | | enterprisedb | 04-FEB-14 09:41:30.546503 | profile + get_dept_name and emp_sal_trig | 2088 | + | | +(2 rows) +``` + +9. Query the `plsql_profiler_units` table to view the amount of time consumed by each unit (each function, procedure, or trigger): + +```text +acctg=# SELECT * FROM plsql_profiler_units; + runid | unit_number | unit_type | unit_owner | + unit_name | unit_timestamp | total_time | spare1 | spare2 +-------+-------------+-----------+--------------+---------------------------- +-----+----------------+------------+--------+-------- + 1 | 16999 | FUNCTION | enterprisedb | + list_emp() | | 4 | | + 2 | 17002 | FUNCTION | enterprisedb | + user_audit_trig() | | 1 | | + 2 | 17000 | FUNCTION | enterprisedb | get_dept_name(p_deptno + numeric) | | 1 | | + 2 | 17004 | FUNCTION | enterprisedb | + emp_sal_trig() | | 1 | | +(4 rows) +``` + +10. Query the `plsql_profiler_rawdata` table to view a list of the wait event counters and wait event times: + +```text +acctg=# SELECT runid, sourcecode, func_oid, line_number, exec_count, +tuples_returned, time_total FROM plsql_profiler_rawdata; + + runid | sourcecode | + func_oid | line_number | exec_count | tuples_returned | time_total +-------+-----------------------------------------------------------------+--- +-------+-------------+------------+-----------------+------------ + 1 | DECLARE + | 16999 | 1 | 0 | 0 | 0 + 1 | v_empno NUMERIC(4); + | 16999 | 2 | 0 | 0 | 0 + 1 | v_ename VARCHAR(10); + | 16999 | 3 | 0 | 0 | 0 + 1 | emp_cur CURSOR FOR + | 16999 | 4 | 0 | 0 | 0 + 1 | SELECT empno, ename FROM memp ORDER BY empno; + | 16999 | 5 | 0 | 0 | 0 + 1 | BEGIN + | 16999 | 6 | 0 | 0 | 0 + 1 | OPEN emp_cur; + | 16999 | 7 | 0 | 0 | 0 + 1 | RAISE INFO 'EMPNO ENAME'; + | 16999 | 8 | 1 | 0 | 0.001621 + 1 | RAISE INFO '----- -------'; + | 16999 | 9 | 1 | 0 | 0.000301 + 1 | LOOP + | 16999 | 10 | 1 | 0 | 4.6e-05 + 1 | FETCH emp_cur INTO v_empno, v_ename; + | 16999 | 11 | 1 | 0 | 0.001114 + 1 | EXIT WHEN NOT FOUND; + | 16999 | 12 | 15 | 0 | 0.000206 + 1 | RAISE INFO '% %', v_empno, v_ename; + | 16999 | 13 | 15 | 0 | 8.3e-05 + 1 | END LOOP; + | 16999 | 14 | 14 | 0 | 0.000773 + 1 | CLOSE emp_cur; + | 16999 | 15 | 0 | 0 | 0 + 1 | RETURN; + | 16999 | 16 | 1 | 0 | 1e-05 + 1 | END; + | 16999 | 17 | 1 | 0 | 0 + 1 | + | 16999 | 18 | 0 | 0 | 0 + 2 | DECLARE + | 17002 | 1 | 0 | 0 | 0 + 2 | v_action VARCHAR(24); + | 17002 | 2 | 0 | 0 | 0 + 2 | v_text TEXT; + | 17002 | 3 | 0 | 0 | 0 + 2 | BEGIN + | 17002 | 4 | 0 | 0 | 0 + 2 | IF TG_OP = 'INSERT' THEN + | 17002 | 5 | 0 | 0 | 0 + 2 | v_action := ' added employee(s) on '; + | 17002 | 6 | 1 | 0 | 0.000143 + 2 | ELSIF TG_OP = 'UPDATE' THEN + | 17002 | 7 | 0 | 0 | 0 + 2 | v_action := ' updated employee(s) on '; + | 17002 | 8 | 0 | 0 | 0 + 2 | ELSIF TG_OP = 'DELETE' THEN + | 17002 | 9 | 1 | 0 | 3.2e-05 + 2 | v_action := ' deleted employee(s) on '; + | 17002 | 10 | 0 | 0 | 0 + 2 | END IF; + | 17002 | 11 | 0 | 0 | 0 + 2 | v_text := 'User ' || USER || v_action || CURRENT_DATE; + | 17002 | 12 | 0 | 0 | 0 + 2 | RAISE INFO ' %', v_text; + | 17002 | 13 | 1 | 0 | 0.000383 + 2 | RETURN NULL; + | 17002 | 14 | 1 | 0 | 6.3e-05 + 2 | END; + | 17002 | 15 | 1 | 0 | 3.6e-05 + 2 | + | 17002 | 16 | 0 | 0 | 0 + 2 | DECLARE + | 17000 | 1 | 0 | 0 | 0 + 2 | v_dname VARCHAR(14); + | 17000 | 2 | 0 | 0 | 0 + 2 | BEGIN + | 17000 | 3 | 0 | 0 | 0 + 2 | SELECT INTO v_dname dname FROM dept WHERE deptno = p_deptno; + | 17000 | 4 | 0 | 0 | 0 + 2 | RETURN v_dname; + | 17000 | 5 | 1 | 0 | 0.000647 + 2 | IF NOT FOUND THEN + | 17000 | 6 | 1 | 0 | 2.6e-05 + 2 | RAISE INFO 'Invalid department number %', p_deptno; + | 17000 | 7 | 0 | 0 | 0 + 2 | RETURN ''; + | 17000 | 8 | 0 | 0 | 0 + 2 | END IF; + | 17000 | 9 | 0 | 0 | 0 + 2 | END; + | 17000 | 10 | 0 | 0 | 0 + 2 | + | 17000 | 11 | 0 | 0 | 0 + 2 | DECLARE + | 17004 | 1 | 0 | 0 | 0 + 2 | sal_diff NUMERIC(7,2); + | 17004 | 2 | 0 | 0 | 0 + 2 | BEGIN + | 17004 | 3 | 0 | 0 | 0 + 2 | IF TG_OP = 'INSERT' THEN + | 17004 | 4 | 0 | 0 | 0 + 2 | RAISE INFO 'Inserting employee %', NEW.empno; + | 17004 | 5 | 1 | 0 | 8.4e-05 + 2 | RAISE INFO '..New salary: %', NEW.sal; + | 17004 | 6 | 0 | 0 | 0 + 2 | RETURN NEW; + | 17004 | 7 | 0 | 0 | 0 + 2 | END IF; + | 17004 | 8 | 0 | 0 | 0 + 2 | IF TG_OP = 'UPDATE' THEN + | 17004 | 9 | 0 | 0 | 0 + 2 | sal_diff := NEW.sal - OLD.sal; + | 17004 | 10 | 1 | 0 | 0.000355 + 2 | RAISE INFO 'Updating employee %', OLD.empno; + | 17004 | 11 | 1 | 0 | 0.000177 + 2 | RAISE INFO '..Old salary: %', OLD.sal; + | 17004 | 12 | 1 | 0 | 5.5e-05 + 2 | RAISE INFO '..New salary: %', NEW.sal; + | 17004 | 13 | 1 | 0 | 3.1e-05 + 2 | RAISE INFO '..Raise : %', sal_diff; + | 17004 | 14 | 1 | 0 | 2.8e-05 + 2 | RETURN NEW; + | 17004 | 15 | 1 | 0 | 2.7e-05 + 2 | END IF; + | 17004 | 16 | 1 | 0 | 1e-06 + 2 | IF TG_OP = 'DELETE' THEN + | 17004 | 17 | 0 | 0 | 0 + 2 | RAISE INFO 'Deleting employee %', OLD.empno; + | 17004 | 18 | 0 | 0 | 0 + 2 | RAISE INFO '..Old salary: %', OLD.sal; + | 17004 | 19 | 0 | 0 | 0 + 2 | RETURN OLD; + | 17004 | 20 | 0 | 0 | 0 + 2 | END IF; + | 17004 | 21 | 0 | 0 | 0 + 2 | END; + | 17004 | 22 | 0 | 0 | 0 + 2 | + | 17004 | 23 | 0 | 0 | 0 + (68 rows) +                                                                                                                    +``` + +11. Query the `plsql_profiler_data` view to review a subset of the information found in `plsql_profiler_rawdata` table: + +```text +acctg=# SELECT * FROM plsql_profiler_data; +runid | unit_number | line# | total_occur | total_time | min_time | max_time +| spare1 | spare2 | spare3 | spare4 +-------+-------------+-------+-------------+------------+----------+--------- +-+--------+--------+--------+-------- + 1 | 16999 | 1 | 0 | 0 | 0 | + 0 | | | | + 1 | 16999 | 2 | 0 | 0 | 0 | + 0 | | | | + 1 | 16999 | 3 | 0 | 0 | 0 | + 0 | | | | + 1 | 16999 | 4 | 0 | 0 | 0 | + 0 | | | | + 1 | 16999 | 5 | 0 | 0 | 0 | + 0 | | | | + 1 | 16999 | 6 | 0 | 0 | 0 | + 0 | | | | + 1 | 16999 | 7 | 0 | 0 | 0 | + 0 | | | | + 1 | 16999 | 8 | 1 | 0.001621 | 0.001621 | + 0.001621 | | | | + 1 | 16999 | 9 | 1 | 0.000301 | 0.000301 | + 0.000301 | | | | + 1 | 16999 | 10 | 1 | 4.6e-05 | 4.6e-05 | 4.6e- + 05 | | | | + 1 | 16999 | 11 | 1 | 0.001114 | 0.001114 | + 0.001114 | | | | + 1 | 16999 | 12 | 15 | 0.000206 | 5e-06 | 7.8e- + 05 | | | | + 1 | 16999 | 13 | 15 | 8.3e-05 | 2e-06 | 4.7e- + 05 | | | | + 1 | 16999 | 14 | 14 | 0.000773 | 4.7e-05 | + 0.000116 | | | | + 1 | 16999 | 15 | 0 | 0 | 0 | + 0 | | | | + 1 | 16999 | 16 | 1 | 1e-05 | 1e-05 | 1e- + 05 | | | | + 1 | 16999 | 17 | 1 | 0 | 0 | + 0 | | | | + 1 | 16999 | 18 | 0 | 0 | 0 | + 0 | | | | + 2 | 17002 | 1 | 0 | 0 | 0 | + 0 | | | | + 2 | 17002 | 2 | 0 | 0 | 0 | + 0 | | | | + 2 | 17002 | 3 | 0 | 0 | 0 | + 0 | | | | + 2 | 17002 | 4 | 0 | 0 | 0 | + 0 | | | | + 2 | 17002 | 5 | 0 | 0 | 0 | + 0 | | | | + 2 | 17002 | 6 | 1 | 0.000143 | 0.000143 | + 0.000143 | | | | + 2 | 17002 | 7 | 0 | 0 | 0 | + 0 | | | | + 2 | 17002 | 8 | 0 | 0 | 0 | + 0 | | | | + 2 | 17002 | 9 | 1 | 3.2e-05 | 3.2e-05 | 3.2e- + 05 | | | | + 2 | 17002 | 10 | 0 | 0 | 0 | + 0 | | | | + 2 | 17002 | 11 | 0 | 0 | 0 | + 0 | | | | + 2 | 17002 | 12 | 0 | 0 | 0 | + 0 | | | | + 2 | 17002 | 13 | 1 | 0.000383 | 0.000383 | + 0.000383 | | | | + 2 | 17002 | 14 | 1 | 6.3e-05 | 6.3e-05 | 6.3e- + 05 | | | | + 2 | 17002 | 15 | 1 | 3.6e-05 | 3.6e-05 | 3.6e- + 05 | | | | + 2 | 17002 | 16 | 0 | 0 | 0 | + 0 | | | | + 2 | 17000 | 1 | 0 | 0 | 0 | + 0 | | | | + 2 | 17000 | 2 | 0 | 0 | 0 | + 0 | | | | + 2 | 17000 | 3 | 0 | 0 | 0 | + 0 | | | | + 2 | 17000 | 4 | 0 | 0 | 0 | + 0 | | | | + 2 | 17000 | 5 | 1 | 0.000647 | 0.000647 | + 0.000647 | | | | + 2 | 17000 | 6 | 1 | 2.6e-05 | 2.6e-05 | 2.6e- + 05 | | | | + 2 | 17000 | 7 | 0 | 0 | 0 | + 0 | | | | + 2 | 17000 | 8 | 0 | 0 | 0 | + 0 | | | | + 2 | 17000 | 9 | 0 | 0 | 0 | + 0 | | | | + 2 | 17000 | 10 | 0 | 0 | 0 | + 0 | | | | + 2 | 17000 | 11 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 1 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 2 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 3 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 4 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 5 | 1 | 8.4e-05 | 8.4e-05 | 8.4e- + 05 | | | | + 2 | 17004 | 6 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 7 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 8 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 9 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 10 | 1 | 0.000355 | 0.000355 | + 0.000355 | | | | + 2 | 17004 | 11 | 1 | 0.000177 | 0.000177 | + 0.000177 | | | | + 2 | 17004 | 12 | 1 | 5.5e-05 | 5.5e-05 | 5.5e- + 05 | | | | + 2 | 17004 | 13 | 1 | 3.1e-05 | 3.1e-05 | 3.1e- + 05 | | | | + 2 | 17004 | 14 | 1 | 2.8e-05 | 2.8e-05 | 2.8e- + 05 | | | | + 2 | 17004 | 15 | 1 | 2.7e-05 | 2.7e-05 | 2.7e- + 05 | | | | + 2 | 17004 | 16 | 1 | 1e-06 | 1e-06 | 1e- + 06 | | | | + 2 | 17004 | 17 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 18 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 19 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 20 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 21 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 22 | 0 | 0 | 0 | + 0 | | | | + 2 | 17004 | 23 | 0 | 0 | 0 | + 0 | | | | +(68 rows) +``` + +### DBMS_PROFILER - Reference + +The Advanced Server installer creates the following tables and views that you can query to review PL/SQL performance profile information: + +| Table Name | Description | +| ------------------------ | -------------------------------------------------------------------------------------------------------------------- | +| `PLSQL_PROFILER_RUNS` | Table containing information about all profiler runs, organized by `runid`. | +| `PLSQL_PROFILER_UNITS` | Table containing information about all profiler runs, organized by unit. | +| `PLSQL_PROFILER_DATA` | View containing performance statistics. | +| `PLSQL_PROFILER_RAWDATA` | Table containing the performance statistics `and` the extended performance statistics for DRITA counters and timers. | + +#### PLSQL_PROFILER_RUNS + +The `PLSQL_PROFILER_RUNS` table contains the following columns: + +| Column | Data Type | Description | +| ----------------- | ----------------------------- | ---------------------------------------------- | +| `runid` | `INTEGER (NOT NULL)` | Unique identifier (`plsql_profiler_runnumber`) | +| `related_run` | `INTEGER` | The `runid` of a related run. | +| `run_owner` | `TEXT` | The role that recorded the profiling session. | +| `run_date` | `TIMESTAMP WITHOUT TIME ZONE` | The profiling session start time. | +| `run_comment` | `TEXT` | User comments relevant to this run | +| `run_total_time` | `BIGINT` | Run time (in microseconds) | +| `run_system_info` | `TEXT` | Currently Unused | +| `run_comment1` | `TEXT` | Additional user comments | +| `spare1` | `TEXT` | Currently Unused | + +#### PLSQL_PROFILER_UNITS + +The `PLSQL_PROFILER_UNITS` table contains the following columns: + +| Column | Data Type | Description | +| ---------------- | ----------------------------- | -------------------------------------------------------------------------------- | +| `runid` | `INTEGER` | Unique identifier (`plsql_profiler_runnumber`) | +| `unit_number` | `OID` | Corresponds to the OID of the row in the pg_proc table that identifies the unit. | +| `unit_type` | `TEXT` | PL/SQL function, procedure, trigger or anonymous block | +| `unit_owner` | `TEXT` | The identity of the role that owns the unit. | +| `unit_name` | `TEXT` | The complete signature of the unit. | +| `unit_timestamp` | `TIMESTAMP WITHOUT TIME ZONE` | Creation date of the unit (currently NULL). | +| `total_time` | `BIGINT` | Time spent within the unit (in milliseconds) | +| `spare1` | `BIGINT` | Currently Unused | +| `spare2` | `BIGINT` | Currently Unused | + +#### PLSQL_PROFILER_DATA + +The `PLSQL_PROFILER_DATA` view contains the following columns: + +| Column | Data Type | Description | +| ------------- | ------------------ | -------------------------------------------------------- | +| `runid` | `INTEGER` | Unique identifier (`plsql_profiler_runnumber`) | +| `unit_number` | `OID` | Object ID of the unit that contains the current line. | +| `line#` | `INTEGER` | Current line number of the profiled workload. | +| `total_occur` | `BIGINT` | The number of times that the line was executed. | +| `total_time` | `DOUBLE PRECISION` | The amount of time spent executing the line (in seconds) | +| `min_time` | `DOUBLE PRECISION` | The minimum execution time for the line. | +| `max_time` | `DOUBLE PRECISION` | The maximum execution time for the line. | +| `spare1` | `NUMBER` | Currently Unused | +| `spare2` | `NUMBER` | Currently Unused | +| `spare3` | `NUMBER` | Currently Unused | +| `spare4` | `NUMBER` | Currently Unused | + +#### PLSQL_PROFILER_RAWDATA + +The `PLSQL_PROFILER_RAWDATA` table contains the statistical and wait events information that is found in the `PLSQL_PROFILER_DATA` view, as well as the performance statistics returned by the DRITA counters and timers. + +| Column | Data Type | Description | +| ---------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `runid` | `INTEGER` | The run identifier `(plsql_profiler_runnumber)`. | +| `sourcecode` | `TEXT` | The individual line of profiled code. | +| `func_oid` | `OID` | Object ID of the unit that contains the current line. | +| `line_number` | `INTEGER` | Current line number of the profiled workload. | +| `exec_count` | `BIGINT` | The number of times that the line was executed. | +| `tuples_returned` | `BIGINT` | Currently Unused | +| `time_total` | `DOUBLE PRECISION` | The amount of time spent executing the line (in seconds) | +| `time_shortest` | `DOUBLE PRECISION` | The minimum execution time for the line. | +| `time_longest` | `DOUBLE PRECISION` | The maximum execution time for the line. | +| `num_scans` | `BIGINT` | Currently Unused | +| `tuples_fetched` | `BIGINT` | Currently Unused | +| `tuples_inserted` | `BIGINT` | Currently Unused | +| `tuples_updated` | `BIGINT` | Currently Unused | +| `tuples_deleted` | `BIGINT` | Currently Unused | +| `blocks_fetched` | `BIGINT` | Currently Unused | +| `blocks_hit` | `BIGINT` | Currently Unused | +| `wal_write` | `BIGINT` | A server has waited for a write to the write-ahead log buffer (expect this value to be high). | +| `wal_flush` | `BIGINT` | A server has waited for the write-ahead log to flush to disk. | +| `wal_file_sync` | `BIGINT` | A server has waited for the write-ahead log to sync to disk (related to the `wal_sync_method` parameter which, by default, is 'fsync' - better performance can be gained by changing this parameter to `open_sync`). | +| `db_file_read` | `BIGINT` | A server has waited for the completion of a read (from disk). | +| `db_file_write` | `BIGINT` | A server has waited for the completion of a write (to disk). | +| `db_file_sync` | `BIGINT` | A server has waited for the operating system to flush all changes to disk. | +| `db_file_extend` | `BIGINT` | A server has waited for the operating system while adding a new page to the end of a file. | +| `sql_parse` | `BIGINT` | Currently Unused. | +| `query_plan` | `BIGINT` | A server has generated a query plan. | +| `other_lwlock_acquire` | `BIGINT` | A server has waited for other light-weight lock to protect data. | +| `shared_plan_cache_collision` | `BIGINT` | A server has waited for the completion of the `shared_plan_cache_collision` event. | +| `shared_plan_cache_insert` | `BIGINT` | A server has waited for the completion of the `shared_plan_cache_insert` event. | +| `shared_plan_cache_hit` | `BIGINT` | A server has waited for the completion of the `shared_plan_cache_hit` event. | +| `shared_plan_cache_miss` | `BIGINT` | A server has waited for the completion of the `shared_plan_cache_miss` event. | +| `shared_plan_cache_lock` | `BIGINT` | A server has waited for the completion of the `shared_plan_cache_lock` event. | +| `shared_plan_cache_busy` | `BIGINT` | A server has waited for the completion of the `shared_plan_cache_busy` event. | +| `shmemindexlock` | `BIGINT` | A server has waited to find or allocate space in the shared memory. | +| `oidgenlock` | `BIGINT` | A server has waited to allocate or assign an OID. | +| `xidgenlock` | `BIGINT` | A server has waited to allocate or assign a transaction ID. | +| `procarraylock` | `BIGINT` | A server has waited to get a snapshot or clearing a transaction ID at transaction end. | +| `sinvalreadlock` | `BIGINT` | A server has waited to retrieve or remove messages from shared invalidation queue. | +| `sinvalwritelock` | `BIGINT` | A server has waited to add a message to the shared invalidation queue. | +| `walbufmappinglock` | `BIGINT` | A server has waited to replace a page in WAL buffers. | +| `walwritelock` | `BIGINT` | A server has waited for WAL buffers to be written to disk. | +| `controlfilelock` | `BIGINT` | A server has waited to read or update the control file or creation of a new WAL file. | +| `checkpointlock` | `BIGINT` | A server has waited to perform a checkpoint. | +| `clogcontrollock` | `BIGINT` | A server has waited to read or update the transaction status. | +| `subtranscontrollock` | `BIGINT` | A server has waited to read or update the sub-transaction information. | +| `multixactgenlock` | `BIGINT` | A server has waited to read or update the shared multixact state. | +| `multixactoffsetcontrollock` | `BIGINT` | A server has waited to read or update multixact offset mappings. | +| `multixactmembercontrollock` | `BIGINT` | A server has waited to read or update multixact member mappings. | +| `relcacheinitlock` | `BIGINT` | A server has waited to read or write the relation cache initialization file. | +| `checkpointercommlock` | `BIGINT` | A server has waited to manage the fsync requests. | +| `twophasestatelock` | `BIGINT` | A server has waited to read or update the state of prepared transactions. | +| `tablespacecreatelock` | `BIGINT` | A server has waited to create or drop the tablespace. | +| `btreevacuumlock` | `BIGINT` | A server has waited to read or update the vacuum related information for a B-tree index. | +| `addinshmeminitlock` | `BIGINT` | A server has waited to manage space allocation in shared memory. | +| `autovacuumlock` | `BIGINT` | The autovacuum launcher waiting to read or update the current state of autovacuum workers. | +| `autovacuumschedulelock` | `BIGINT` | A server has waited to ensure that the table selected for a vacuum still needs vacuuming. | +| `syncscanlock` | `BIGINT` | A server has waited to get the start location of a scan on a table for synchronized scans. | +| `relationmappinglock` | `BIGINT` | A server has waited to update the relation map file used to store catalog to file node mapping. | +| `asyncctllock` | `BIGINT` | A server has waited to read or update shared notification state. | +| `asyncqueuelock` | `BIGINT` | A server has waited to read or update the notification messages. | +| `serializablexacthashlock` | `BIGINT` | A server has waited to retrieve or store information about serializable transactions. | +| `serializablefinishedlistlock` | `BIGINT` | A server has waited to access the list of finished serializable transactions. | +| `serializablepredicatelocklistlock` | `BIGINT` | A server has waited to perform an operation on a list of locks held by serializable transactions. | +| `oldserxidlock` | `BIGINT` | A server has waited to read or record the conflicting serializable transactions. | +| `syncreplock` | `BIGINT` | A server has waited to read or update information about synchronous replicas. | +| `backgroundworkerlock` | `BIGINT` | A server has waited to read or update the background worker state. | +| `dynamicsharedmemorycontrollock` | `BIGINT` | A server has waited to read or update the dynamic shared memory state. | +| `autofilelock` | `BIGINT` | A server has waited to update the `postgresql.auto.conf` file. | +| `replicationslotallocationlock` | `BIGINT` | A server has waited to allocate or free a replication slot. | +| `replicationslotcontrollock` | `BIGINT` | A server has waited to read or update replication slot state. | +| `committscontrollock` | `BIGINT` | A server has waited to read or update transaction commit timestamps. | +| `committslock` | `BIGINT` | A server has waited to read or update the last value set for the transaction timestamp. | +| `replicationoriginlock` | `BIGINT` | A server has waited to set up, drop, or use replication origin. | +| `multixacttruncationlock` | `BIGINT` | A server has waited to read or truncate multixact information. | +| `oldsnapshottimemaplock` | `BIGINT` | A server has waited to read or update old snapshot control information. | +| `backendrandomlock` | `BIGINT` | A server has waited to generate a random number. | +| `logicalrepworkerlock` | `BIGINT` | A server has waited for the action on logical replication worker to finish. | +| `clogtruncationlock` | `BIGINT` | A server has waited to truncate the write-ahead log or waiting for write-ahead log truncation to finish. | +| `bulkloadlock` | `BIGINT` | A server has waited for the `bulkloadlock` to bulk upload the data. | +| `edbresourcemanagerlock` | `BIGINT` | The `edbresourcemanagerlock` provides detail about edb resource manager lock module. | +| `wal_write_time` | `BIGINT` | The amount of time that the server has waited for a `wal_write` wait event to write to the write-ahead log buffer (expect this value to be high). | +| `wal_flush_time` | `BIGINT` | The amount of time that the server has waited for a `wal_flush` wait event to write-ahead log to flush to disk. | +| `wal_file_sync_time` | `BIGINT` | The amount of time that the server has waited for a `wal_file_sync` wait event to write-ahead log to sync to disk (related to the wal_sync_method parameter which, by default, is 'fsync' - better performance can be gained by changing this parameter to open_sync). | +| `db_file_read_time` | `BIGINT` | The amount of time that the server has waited for the `db_file_read` wait event for completion of a read (from disk). | +| `db_file_write_time` | `BIGINT` | The amount of time that the server has waited for the `db_file_write` wait event for completion of a write (to disk). | +| `db_file_sync_time` | `BIGINT` | The amount of time that the server has waited for the `db_file_sync` wait event to sync all changes to disk. | +| `db_file_extend_time` | `BIGINT` | The amount of time that the server has waited for the `db_file_extend` wait event while adding a new page to the end of a file. | +| `sql_parse_time` | `BIGINT` | The amount of time that the server has waited for the `sql_parse` wait event to parse a SQL statement. | +| `query_plan_time` | `BIGINT` | The amount of time that the server has waited for the `query_plan` wait event to compute the execution plan for a SQL statement. | +| `other_lwlock_acquire_time` | `BIGINT` | The amount of time that the server has waited for the `other_lwlock_acquire` wait event to protect data. | +| `shared_plan_cache_collision_time` | `BIGINT` | The amount of time that the server has waited for the `shared_plan_cache_collision` wait event. | +| `shared_plan_cache_insert_time` | `BIGINT` | The amount of time that the server has waited for the `shared_plan_cache_insert` wait event. | +| `shared_plan_cache_hit_time` | `BIGINT` | The amount of time that the server has waited for the `shared_plan_cache_hit` wait event. | +| `shared_plan_cache_miss_time` | `BIGINT` | The amount of time that the server has waited for the `shared_plan_cache_miss` wait event. | +| `shared_plan_cache_lock_time` | `BIGINT` | The amount of time that the server has waited for the `shared_plan_cache_lock` wait event. | +| `shared_plan_cache_busy_time` | `BIGINT` | The amount of time that the server has waited for the `shared_plan_cache_busy` wait event. | +| `shmemindexlock_time` | `BIGINT` | The amount of time that the server has waited for the `shmemindexlock` wait event to find or allocate space in the shared memory. | +| `oidgenlock_time` | `BIGINT` | The amount of time that the server has waited for the `oidgenlock` wait event to allocate or assign an OID. | +| `xidgenlock_time` | `BIGINT` | The amount of time that the server has waited for `xidgenlock` wait event to allocate or assign a transaction ID. | +| `procarraylock_time` | `BIGINT` | The amount of time that the server has waited for a `procarraylock` wait event to clear a transaction ID at transaction end. | +| `sinvalreadlock_time` | `BIGINT` | The amount of time that the server has waited for a `sinvalreadlock` wait event to retrieve or remove messages from shared invalidation queue. | +| `sinvalwritelock_time` | `BIGINT` | The amount of time that the server has waited for a `sinvalwritelock` wait event to add a message to the shared invalidation queue. | +| `walbufmappinglock_time` | `BIGINT` | The amount of time that the server has waited for a `walbufmappinglock` wait event to replace a page in WAL buffers. | +| `walwritelock_time` | `BIGINT` | The amount of time that the server has waited for a `walwritelock` wait event to write the WAL buffers to disk. | +| `controlfilelock_time` | `BIGINT` | The amount of time that the server has waited for a `controlfilelock` wait event to read or update the control file or to create a new WAL file. | +| `checkpointlock_time` | `BIGINT` | The amount of time that the server has waited for a `checkpointlock` wait event to perform a checkpoint. | +| `clogcontrollock_time` | `BIGINT` | The amount of time that the server has waited for a `clogcontrollock` wait event to read or update the transaction status. | +| `subtranscontrollock_time` | `BIGINT` | The amount of time that the server has waited for the `subtranscontrollock` wait event to read or update the sub-transaction information. | +| `multixactgenlock_time` | `BIGINT` | The amount of time that the server has waited for the `multixactgenlock` wait event to read or update the shared multixact state. | +| `multixactoffsetcontrollock_time` | `BIGINT` | The amount of time that the server has waited for the `multixactoffsetcontrollock` wait event to read or update multixact offset mappings. | +| `multixactmembercontrollock_time` | `BIGINT` | The amount of time that the server has waited for the `multixactmembercontrollock` wait event to read or update multixact member mappings. | +| `relcacheinitlock_time` | `BIGINT` | The amount of time that the server has waited for the `relcacheinitlock` wait event to read or write the relation cache initialization file. | +| `checkpointercommlock_time` | `BIGINT` | The amount of time that the server has waited for the `checkpointercommlock` wait event to manage the fsync requests. | +| `twophasestatelock_time` | `BIGINT` | The amount of time that the server has waited for the `twophasestatelock` wait event to read or update the state of prepared transactions. | +| `tablespacecreatelock_time` | `BIGINT` | The amount of time that the server has waited for the `tablespacecreatelock` wait event to create or drop the tablespace. | +| `btreevacuumlock_time` | `BIGINT` | The amount of time that the server has waited for the `btreevacuumlock` wait event to read or update the vacuum related information for a B-tree index. | +| `addinshmeminitlock_time` | `BIGINT` | The amount of time that the server has waited for the `addinshmeminitlock` wait event to manage space allocation in shared memory. | +| `autovacuumlock_time` | `BIGINT` | The amount of time that the server has waited for the `autovacuumlock` wait event to read or update the current state of autovacuum workers. | +| `autovacuumschedulelock_time` | `BIGINT` | The amount of time that the server has waited for the `autovacuumschedulelock` wait event to ensure that the table selected for a vacuum still needs vacuuming. | +| `syncscanlock_time` | `BIGINT` | The amount of time that the server has waited for the `syncscanlock` wait event to get the start location of a scan on a table for synchronized scans. | +| `relationmappinglock_time` | `BIGINT` | The amount of time that the server has waited for the `relationmappinglock` wait event to update the relation map file used to store catalog to file node mapping. | +| `asyncctllock_time` | `BIGINT` | The amount of time that the server has waited for the `asyncctllock` wait event to read or update shared notification state. | +| `asyncqueuelock_time` | `BIGINT` | The amount of time that the server has waited for the `asyncqueuelock` wait event to read or update the notification messages. | +| `serializablexacthashlock_time` | `BIGINT` | The amount of time that the server has waited for the `serializablexacthashlock` wait event to retrieve or store information about serializable transactions. | +| `serializablefinishedlistlock_time` | `BIGINT` | The amount of time that the server has waited for the `serializablefinishedlistlock` wait event to access the list of finished serializable transactions. | +| `serializablepredicatelocklistlock_time` | `BIGINT` | The amount of time that the server has waited for the `serializablepredicatelocklistlock` wait event to perform an operation on a list of locks held by serializable transactions. | +| `oldserxidlock_time` | `BIGINT` | The amount of time that the server has waited for the `oldserxidlock` wait event to read or record the conflicting serializable transactions. | +| `syncreplock_time` | `BIGINT` | The amount of time that the server has waited for the `syncreplock` wait event to read or update information about synchronous replicas. | +| `backgroundworkerlock_time` | `BIGINT` | The amount of time that the server has waited for the `backgroundworkerlock` wait event to read or update the background worker state. | +| `dynamicsharedmemorycontrollock_time` | `BIGINT` | The amount of time that the server has waited for the `dynamicsharedmemorycontrollock` wait event to read or update the dynamic shared memory state. | +| `autofilelock_time` | `BIGINT` | The amount of time that the server has waited for the `autofilelock` wait event to update the `postgresql.auto.conf` file. | +| `replicationslotallocationlock_time` | `BIGINT` | The amount of time that the server has waited for the `replicationslotallocationlock` wait event to allocate or free a replication slot. | +| `replicationslotcontrollock_time` | `BIGINT` | The amount of time that the server has waited for the `replicationslotcontrollock` wait event to read or update replication slot state. | +| `committscontrollock_time` | `BIGINT` | The amount of time that the server has waited for the `committscontrollock` wait event to read or update transaction commit timestamps. | +| `committslock_time` | `BIGINT` | The amount of time that the server has waited for the `committslock` wait event to read or update the last value set for the transaction timestamp. | +| `replicationoriginlock_time` | `BIGINT` | The amount of time that the server has waited for the `replicationoriginlock` wait event to set up, drop, or use replication origin. | +| `multixacttruncationlock_time` | `BIGINT` | The amount of time that the server has waited for the `multixacttruncationlock` wait event to read or truncate multixact information. | +| `oldsnapshottimemaplock_time` | `BIGINT` | The amount of time that the server has waited for the `oldsnapshottimemaplock` wait event to read or update old snapshot control information. | +| `backendrandomlock_time` | `BIGINT` | The amount of time that the server has waited for the `backendrandomlock` wait event to generate a random number. | +| `logicalrepworkerlock_time` | `BIGINT` | The amount of time that the server has waited for the `logicalrepworkerlock` wait event for an action on logical replication worker to finish. | +| `clogtruncationlock_time` | `BIGINT` | The amount of time that the server has waited for the `clogtruncationlock` wait event to truncate the write-ahead log or waiting for write-ahead log truncation to finish. | +| `bulkloadlock_time` | `BIGINT` | The amount of time that the server has waited for the `bulkloadlock` wait event to bulk upload the data. | +| `edbresourcemanagerlock_time` | `BIGINT` | The amount of time that the server has waited for the `edbresourcemanagerlock` wait event. | +| `totalwaits` | `BIGINT` | The total number of event waits. | +| `totalwaittime` | `BIGINT` | The total time spent waiting for an event. | diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/12_dbms_random.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/12_dbms_random.mdx new file mode 100644 index 00000000000..2a237c8fc29 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/12_dbms_random.mdx @@ -0,0 +1,235 @@ +--- +title: "DBMS_RANDOM" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_random.html" +--- + +The `DBMS_RANDOM` package provides a number of methods to generate random values. The procedures and functions available in the `DBMS_RANDOM` package are listed in the following table. + +| Function/Procedure | Return Type | Description | +| ------------------ | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | +| `INITIALIZE(val)` | n/a | Initializes the `DBMS_RANDOM` package with the specified seed `value`. Deprecated, but supported for backward compatibility. | +| `NORMAL()` | `NUMBER` | Returns a random `NUMBER`. | +| `RANDOM` | `INTEGER` | Returns a random `INTEGER` with a value greater than or equal to -2^31 and less than 2^31. Deprecated, but supported for backward compatibility. | +| `SEED(val)` | n/a | Resets the seed with the specified `value`. | +| `SEED(val)` | n/a | Resets the seed with the specified `value`. | +| `STRING(opt, len)` | `VARCHAR2` | Returns a random string. | +| `TERMINATE` | n/a | `TERMINATE` has no effect. Deprecated, but supported for backward compatibility. | +| `VALUE` | `NUMBER` | Returns a random number with a value greater than or equal to `0` and less than `1`, with 38 digit precision. | +| `VALUE(low, high)` | `NUMBER` | Returns a random number with a value greater than or equal to `low` and less than `high`. | + +## INITIALIZE + +The `INITIALIZE` procedure initializes the `DBMS_RANDOM` package with a seed value. The signature is: + +```text +INITIALIZE( IN INTEGER) +``` + +This procedure should be considered deprecated; it is included for backward compatibility only. + +**Parameters** + +`val` + + `val` is the seed value used by the `DBMS_RANDOM` package algorithm. + +**Example** + +The following code snippet demonstrates a call to the `INITIALIZE` procedure that initializes the `DBMS_RANDOM` package with the seed value, `6475`. + +```text +DBMS_RANDOM.INITIALIZE(6475); +``` + +## NORMAL + +The `NORMAL` function returns a random number of type `NUMBER`. The signature is: + +```text + NUMBER NORMAL() +``` + +**Parameters** + +`result` + + `result` is a random value of type `NUMBER`. + +**Example** + +The following code snippet demonstrates a call to the `NORMAL` function: + +```text +x:= DBMS_RANDOM.NORMAL(); +``` + +## RANDOM + +The `RANDOM` function returns a random `INTEGER` value that is greater than or equal to -2 ^31 and less than 2 ^31. The signature is: + +```text + INTEGER RANDOM() +``` + +This function should be considered deprecated; it is included for backward compatibility only. + +**Parameters** + +`result` + + `result` is a random value of type `INTEGER`. + +**Example** + +The following code snippet demonstrates a call to the `RANDOM` function. The call returns a random number: + +```text +x := DBMS_RANDOM.RANDOM(); +``` + +## SEED + +The first form of the `SEED` procedure resets the seed value for the `DBMS_RANDOM` package with an `INTEGER` value. The `SEED` procedure is available in two forms; the signature of the first form is: + +```text +SEED( IN INTEGER) +``` + +**Parameters** + +`val` + + `val` is the seed value used by the `DBMS_RANDOM` package algorithm. + +**Example** + +The following code snippet demonstrates a call to the `SEED` procedure; the call sets the seed value at `8495`. + +```text +DBMS_RANDOM.SEED(8495); +``` + +## SEED + +The second form of the `SEED` procedure resets the seed value for the `DBMS_RANDOM` package with a string value. The `SEED` procedure is available in two forms; the signature of the second form is: + +```text +SEED( IN VARCHAR2) +``` + +**Parameters** + +`val` + + `val` is the seed value used by the `DBMS_RANDOM` package algorithm. + +**Example** + +The following code snippet demonstrates a call to the `SEED` procedure; the call sets the seed value to `abc123`. + +```text +DBMS_RANDOM.SEED('abc123'); +``` + +## STRING + +The `STRING` function returns a random `VARCHAR2` string in a user-specified format. The signature of the `STRING` function is: + +```text + VARCHAR2 STRING( IN CHAR, IN NUMBER) +``` + +**Parameters** + +`opt` + + Formatting option for the returned string. `option` may be: + +| **Option** | **Specifies Formatting Option** | +| ---------- | ------------------------------- | +| `u` or `U` | Uppercase alpha string | +| `l` or `L` | Lowercase alpha string | +| `a` or `A` | Mixed case string | +| `x` or `X` | Uppercase alpha-numeric string | +| `p` or `P` | Any printable characters | + +`len` + + The length of the returned string. + +`result` + + `result` is a random value of type `VARCHAR2`. + +**Example** + +The following code snippet demonstrates a call to the `STRING` function; the call returns a random alpha-numeric character string that is 10 characters long. + +```text +x := DBMS_RANDOM.STRING('X', 10); +``` + +## TERMINATE + +The `TERMINATE` procedure has no effect. The signature is: + +```text +TERMINATE +``` + +The `TERMINATE` procedure should be considered deprecated; the procedure is supported for compatibility only. + +## VALUE + +The `VALUE` function returns a random `NUMBER` that is greater than or equal to 0, and less than 1, with 38 digit precision. The `VALUE` function has two forms; the signature of the first form is: + +```text + NUMBER VALUE() +``` + +**Parameters** + +`result` + + `result` is a random value of type `NUMBER`. + +**Example** + +The following code snippet demonstrates a call to the `VALUE` function. The call returns a random `NUMBER`: + +```text +x := DBMS_RANDOM.VALUE(); +``` + +## VALUE + +The `VALUE` function returns a random `NUMBER` with a value that is between user-specified boundaries. The `VALUE` function has two forms; the signature of the second form is: + +```text + NUMBER VALUE( IN NUMBER, IN NUMBER) +``` + +**Parameters** + +`low` + + `low` specifies the lower boundary for the random value. The random value may be equal to `low`. + +`high` + + `high` specifies the upper boundary for the random value; the random value will be less than `high`. + +`result` + + `result` is a random value of type `NUMBER`. + +**Example** + +The following code snippet demonstrates a call to the `VALUE` function. The call returns a random `NUMBER` with a value that is greater than or equal to 1 and less than 100: + +```text +x := DBMS_RANDOM.VALUE(1, 100); +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/13_dbms_redact.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/13_dbms_redact.mdx new file mode 100644 index 00000000000..a3e0d5d3981 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/13_dbms_redact.mdx @@ -0,0 +1,708 @@ +--- +title: "DBMS_REDACT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_redact.html" +--- + +The `DBMS_REDACT` package enables the redacting or masking of data returned by a query. The `DBMS_REDACT` package provides a procedure to create policies, alter policies, enable policies, disable policies, and drop policies. The procedures available in the `DBMS_REDACT` package are listed in the following table. + +| Function/Procedure | Function or Procedure | Return Type | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | ----------- | --------------------------------------------------------------------- | +| `ADD_POLICY(object_schema, object_name, policy_name, policy_description, column_name, column_description, function_type, function_parameters, expression, enable, regexp_pattern, regexp_replace_string, regexp_position, regexp_occurence, regexp_match_parameter, custom_function_expression)` | Procedure | n/a | Adds a data redaction policy. | +| `ALTER_POLICY(object_schema, object_name, policy_name, action, column_name, function_type, function_parameters, expression, regexp_pattern, regexp_replace_string, regexp_position, regexp_occurence, regexp_match_parameter, policy_description, column_description, custom_function_expression)` | Procedure | n/a | Alters the existing data redaction policy. | +| `DISABLE_POLICY(object_schema, object_name, policy_name)` | Procedure | n/a | Disables the existing data redaction policy. | +| `ENABLE_POLICY(object_schema, object_name, policy_name)` | Procedure | n/a | Enables a previously disabled data redaction policy. | +| `DROP_POLICY(object_schema, object_name, policy_name)` | Procedure | n/a | Drops a data redaction policy. | +| `UPDATE_FULL_REDACTION_VALUES(number_val, binfloat_val, bindouble_val, char_val, varchar_val, nchar_val, nvarchar_val, datecol_val, ts_val, tswtz_val, blob_val, clob_val, nclob_val)` | Procedure | n/a | Updates the full redaction default values for the specified datatype. | + +The data redaction feature uses the `DBMS_REDACT` package to define policies or conditions to redact data in a column based on the table column type and redaction type. + +Note that you must be the owner of the table to create or change the data redaction policies. The users are exempted from all the column redaction policies, which the table owner or super-user is by default. + +## Using DBMS_REDACT Constants and Function Parameters + +The `DBMS_REDACT` package uses the constants and redacts the column data by using any one of the data redaction types. The redaction type can be decided based on the `function_type` parameter of `dbms_redact.add_policy` and `dbms_redact.alter_policy` procedure. The below table highlights the values for `function_type` parameters of `dbms_redact.add_policy` and `dbms_redact.alter_policy`. + +| Constant | Type | Value | Description | +| --------- | --------- | ----- | --------------------------------------------------------------------------------------------------------- | +| `NONE` | `INTEGER` | `0` | No redaction, zero effect on the result of a query against table. | +| `FULL` | `INTEGER` | `1` | Full redaction, redacts full values of the column data. | +| `PARTIAL` | `INTEGER` | `2` | Partial redaction, redacts a portion of the column data. | +| `RANDOM` | `INTEGER` | `4` | Random redaction, each query results in a different random value depending on the datatype of the column. | +| `REGEXP` | `INTEGER` | `5` | Regular Expression based redaction, searches for the pattern of data to redact. | +| `CUSTOM` | `INTEGER` | `99` | Custom redaction type. | + +The following table shows the values for the `action` parameter of `dbms_redact.alter_policy`. + +| Constant | Type | Value | Description | +| ------------------------ | --------- | ----- | --------------------------------------------------------------------------------------------------------------------------------------- | +| `ADD_COLUMN` | `INTEGER` | `1` | Adds a column to the redaction policy. | +| `DROP_COLUMN` | `INTEGER` | `2` | Drops a column from the redaction policy. | +| `MODIFY_EXPRESSION` | `INTEGER` | `3` | Modifies the expression of a redaction policy. The redaction is applied when the expression evaluates to the `BOOLEAN` value to `TRUE`. | +| `MODIFY_COLUMN` | `INTEGER` | `4` | Modifies a column in the redaction policy to change the redaction function type or function parameter. | +| `SET_POLICY_DESCRIPTION` | `INTEGER` | `5` | Sets the redaction policy description. | +| `SET_COLUMN_DESCRIPTION` | `INTEGER` | `6` | Sets a description for the redaction performed on the column. | + +The partial data redaction enables you to redact only a portion of the column data. To use partial redaction, you must set the `dbms_redact.add_policy` procedure `function_type` parameter to `dbms_redact.partial` and use the `function_parameters` parameter to specify the partial redaction behavior. + +The data redaction feature provides a predefined format to configure policies that use the following datatype: + +- `Character` +- `Number` +- `Datetime` + +The following table highlights the format descriptor for partial redaction with respect to datatype. The example described below shows how to perform a redaction for a string datatype (in this scenario, a Social Security Number (SSN)), a `Number` datatype, and a `DATE` datatype. + +| Datatype | Format Descriptor | Description | Examples | +| --------- | ------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Character | `REDACT_PARTIAL_INPUT_FORMAT` | Specifies the input format. Enter `V` for each character from the input string to be possibly redacted. Enter `F` for each character from the input string that can be considered as a separator such as blank spaces or hyphens. | Consider `'VVVFVVFVVVV,VVV-VV-VVVV,X,1,5'` for masking first 5 digits of SSN strings such as `123-45-6789`, adding hyphen to format it and thereby resulting in strings such as `XXX-XX-6789.` The field value `VVVFVVFVVVV` for matching SSN strings such as `123-45-6789`. | +| | `REDACT_PARTIAL_OUTPUT_FORMAT` | Specifies the output format. Enter `V` for each character from the input string to be possibly redacted. Replace each `F` character from the input format with a character such as a hyphen or any other separator. | The field value `VVV-VV-VVVV` can be used to redact SSN strings into `XXX-XX-6789` where `X` comes from `REDACT_PARTIAL_MASKCHAR` field. | +| | `REDACT_PARTIAL_MASKCHAR` | Specifies the character to be used for redaction. | The value `X` for redacting SSN strings into `XXX-XX-6789`. | +| | `REDACT_PARTIAL_MASKFROM` | Specifies which `V` within the input format from which to start the redaction. | The value `1` for redacting SSN strings starting at the first `V` of the input format of `VVVFVVFVVVV` into strings such as `XXX-XX-6789`. | +| | `REDACT_PARTIAL_MASKTO` | Specifies which `V` within the input format at which to end the redaction. | The value `5` for redacting SSN strings up to and including the fifth `V` within the input format of `VVVFVVFVVVV` into strings such as `XXX-XX-6789`. | +| Number | `REDACT_PARTIAL_MASKCHAR` | Specifies the character to be displayed in the range between 0 and 9. | `‘9, 1, 5’` for redacting the first five digits of the Social Security Number `123456789` into `999996789`. | +| | `REDACT_PARTIAL_MASKFROM` | Specifies the start digit position for redaction. | | +| | `REDACT_PARTIAL_MASKTO` | Specifies the end digit position for redaction. | | +| Datetime | `REDACT_PARTIAL_DATE_MONTH` | `‘m’` redacts the month. To mask a specific month, specify `‘m#’` where # indicates the month specified by its number between `1` and `12`. | `m3` displays as March. | +| | `REDACT_PARTIAL_DATE_DAY` | `‘d’` redacts the day of the month. To mask with a day of the month, append `1-31` to a lowercase `d`. | `d3` displays as `03`. | +| | `REDACT_PARTIAL_DATE_YEAR` | `‘y’` redacts the year. To mask with a year, append `1-9999` to a lowercase `y`. | `y1960` displays as `60`. | +| | `REDACT_PARTIAL_DATE_HOUR` | `‘h’` redacts the hour. To mask with an hour, append `0-23` to a lowercase `h`. | `h18` displays as `18`. | +| | `REDACT_PARTIAL_DATE_MINUTE` | `‘m’` redacts the minute. To mask with a minute, append `0-59` to a lowercase `m`. | `m20` displays as `20`. | +| | `REDACT_PARTIAL_DATE_SECOND` | `‘s’` redacts the second. To mask with a second, append `0-59` to a lowercase `s`. | `s40` displays as `40`. | + +The following table represents `function_parameters` values that can be used in partial redaction. + +| Function Parameter | Data Type | Value | Description | +| ----------------------------- | ---------- | -------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `REDACT_US_SSN_F5` | `VARCHAR2` | `'VVVFVVFVVVV,VVV-VV-VVVV,X,1,5'` | Redacts the first 5 numbers of SSN. **Example:** The number `123-45-6789` becomes `XXX-XX-6789`. | +| `REDACT_US_SSN_L4` | `VARCHAR2` | `'VVVFVVFVVVV,VVV-VV-VVVV,X,6,9'` | Redacts the last 4 numbers of SSN. **Example:** The number `123-45-6789` becomes `123-45-XXXX`. | +| `REDACT_US_SSN_ENTIRE` | `VARCHAR2` | `'VVVFVVFVVVV,VVV-VV-VVVV,X,1,9'` | Redacts the entire SSN. **Example:** The number `123-45-6789` becomes `XXX-XX-XXXX`. | +| `REDACT_NUM_US_SSN_F5` | `VARCHAR2` | `'9,1,5'` | Redacts the first 5 numbers of SSN when the column is a number datatype. **Example:** The number `123456789` becomes `999996789`. | +| `REDACT_NUM_US_SSN_L4` | `VARCHAR2` | `'9,6,9'` | Redacts the last 4 numbers of SSN when the column is a number datatype. **Example:** The number `123456789` becomes `123459999`. | +| `REDACT_NUM_US_SSN_ENTIRE` | `VARCHAR2` | `'9,1,9'` | Redacts the entire SSN when the column is a number datatype. **Example:** The number `123456789` becomes `999999999`. | +| `REDACT_ZIP_CODE` | `VARCHAR2` | `'VVVVV,VVVVV,X,1,5'` | Redacts a 5 digit zip code. **Example:** `12345`becomes `XXXXX`. | +| `REDACT_NUM_ZIP_CODE` | `VARCHAR2` | `'9,1,5'` | Redacts a 5 digit zip code when the column is a number datatype. **Example:** `12345`becomes `99999`. | +| `REDACT_CCN16_F12` | `VARCHAR2` | `'VVVVFVVVVFVVVVFVVVV,VVVV-VVVV-VVVV-VVVV,*,1,12'` | Redacts a 16 digit credit card number and displays only 4 digits. **Example:** `1234 5678 9000 2358` becomes `****-****-****-2358`. | +| `REDACT_DATE_MILLENNIUM` | `VARCHAR2` | `'m1d1y2000'` | Redacts a date that is in the `DD-MM-YY` format. **Example:** Redacts all date to `01-JAN-2000`. | +| `REDACT_DATE_EPOCH` | `VARCHAR2` | `'m1d1y1970'` | Redacts all dates to `01-JAN-70`. | +| `REDACT_AMEX_CCN_FORMATTED` | `VARCHAR2` | `'VVVVFVVVVVVFVVVVV,VVVV-VVVVVV-VVVVV,*,1,10'` | Redacts the Amercian Express credit card number and replaces the digit with `*` except for the last 5 digits. **Example:** The credit card number `1234 567890 34500` becomes `**** ****** 34500`. | +| `REDACT_AMEX_CCN_NUMBER` | `VARCHAR2` | `'0,1,10'` | Redacts the Amercian Express credit card number and replaces the digit with `0` except for the last 5 digits. **Example:** The credit card number `1234 567890 34500` becomes `0000 000000 34500`. | +| `REDACT_SIN_FORMATTED` | `VARCHAR2` | `'VVVFVVVFVVV,VVV-VVV-VVV,*,1,6'` | Redacts the Social Insurance Number by replacing the first 6 digits by `*`. **Example:** `123-456-789` becomes `***-***-789`. | +| `REDACT_SIN_NUMBER` | `VARCHAR2` | `'9,1,6'` | Redacts the Social Insurance Number by replacing the first 6 digits by `9`. **Example:** `123456789` becomes `999999789`. | +| `REDACT_SIN_UNFORMATTED` | `VARCHAR2` | `'VVVVVVVVV,VVVVVVVVV,*,1,6'` | Redacts the Social Insurance Number by replacing the first 6 digits by `*`. **Example:** `123456789` becomes `******789`. | +| `REDACT_CCN_FORMATTED` | `VARCHAR2` | `'VVVVFVVVVFVVVVFVVVV,VVVV-VVVV-VVVV-VVVV,*,1,12'` | Redacts a credit card number by `*` and displays only 4 digits. **Example:** The credit card number `1234-5678-9000-4671` becomes `****-****-****-4671`. | +| `REDACT_CCN_NUMBER` | `VARCHAR2` | `'9,1,12'` | Redacts a credit card number by `0` except the last 4 digits. **Example:** The credit card number `1234567890004671` becomes `0000000000004671`. | +| `REDACT_NA_PHONE_FORMATTED` | `VARCHAR2` | `‘VVVFVVVFVVVV,VVV-VVV-VVVV,X,4,10'` | Redacts the North American phone number by `X` leaving the area code. **Example:** `123-456-7890` becomes `123-XXX-XXXX`. | +| `REDACT_NA_PHONE_NUMBER` | `VARCHAR2` | `'0,4,10'` | Redacts the North American phone number by `0` leaving the area code. **Example:** `1234567890` becomes `1230000000`. | +| `REDACT_NA_PHONE_UNFORMATTED` | `VARCHAR2` | `'VVVVVVVVVV,VVVVVVVVVV,X,4,10'` | Redacts the North American phone number by `X` leaving the area code. **Example:** `1234567890` becomes `123XXXXXXX`. | +| `REDACT_UK_NIN_FORMATTED` | `VARCHAR2` | `'VVFVVFVVFVVFV,VV VV VV VV V,X,3,8'` | Redacts the UK National Insurance Number by `X` but leaving the alphabetic characters. **Example:** `NY 22 01 34 D` becomes `NY XX XX XX D`. | +| `REDACT_UK_NIN_UNFORMATTED` | `VARCHAR2` | `'VVVVVVVVV,VVVVVVVVV,X,3,8'` | Redacts the UK National Insurance Number by `X` but leaving the alphabetic characters. **Example:** `NY220134D` becomes `NYXXXXXXD`. | + +A regular expression-based redaction searches for patterns of data to redact. The `regexp_pattern` search the values in order for the `regexp_replace_string` to change the value. The following table illustrates the `regexp_pattern` values that you can use during `REGEXP` based redaction. + +| Function Parameter and Description | Data Type | Value | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------: | :----------------------------------------------------: | +| `RE_PATTERN_CC_L6_T4`: Searches for the middle digits of a credit card number that includes 6 leading digits and 4 trailing digits.
The `regexp_replace_string` setting to use with the format is `RE_REDACT_CC_MIDDLE_DIGITS` that replaces the identified pattern with the characters specified by the `RE_REDACT_CC_MIDDLE_DIGITS` parameter. | `VARCHAR2` | `'(\d\d\d\d\d\d)(\d\d\d*)(\d\d\d\d)'` | +| `RE_PATTERN_ANY_DIGIT`: Searches for any digit and replaces the identified pattern with the characters specified by the following values of the `regexp_replace_string` parameter.
`regexp_replace_string=> RE_REDACT_WITH_SINGLE_X`
(replaces any matched digit with the `X` character).
`regexp_replace_string=> RE_REDACT_WITH_SINGLE_1`
(replaces any matched digit with the `1` character). | `VARCHAR2` | `'\d'` | +| `RE_PATTERN_US_PHONE`: Searches for the U.S phone number and replaces the identified pattern with the characters specified by the `regexp_replace_string` parameter.
`regexp_replace_string=> RE_REDACT_US_PHONE_L7`
(searches the phone number and then replaces the last 7 digits). | `VARCHAR2` | `'(\(\d\d\d\)\|\d\d\d)-(\d\d\d)-(\d\d\d\d)'` | +| `RE_PATTERN_EMAIL_ADDRESS`: Searches for the email address and replaces the identified pattern with the characters specified by the following values of the `regexp_replace_string` parameter.
`regexp_replace_string=> RE_REDACT_EMAIL_NAME`
(finds the email address and redacts the email username).
`regexp_replace_string=> RE_REDACT_EMAIL_DOMAIN`
(finds the email address and redacts the email domain).
`regexp_replace_string=> RE_REDACT_EMAIL_ENTIRE`
(finds the email address and redacts the entire email address). | `VARCHAR2` | `'([A-Za-z0-9._%+-]+)@([A-Za-z0-9.-]+\.[A-Za-z]{2,4})'` | +| `RE_PATTERN_IP_ADDRESS`: Searches for an IP address and replaces the identified pattern with the characters specified by the `regexp_replace_string` parameter. The `regexp_replace_string` parameter to be used is `RE_REDACT_IP_L3` that replaces the last section of an IP address with `999` and indicates it is redacted. | `VARCHAR2` | `'(\d{1,3}\.\d{1,3}\.\d{1,3})\.\d{1,3}'` | +| `RE_PATTERN_AMEX_CCN`: Searches for the American Express credit card number. The `regexp_replace_string` parameter to be used is `RE_REDACT_AMEX_CCN` that redacts all of the digits except the last 5. | `VARCHAR2` | `'.*(\d\d\d\d\d)$'` | +| `RE_PATTERN_CCN`: Searches for the credit card number other than American Express credit cards. The `regexp_replace_string` parameter to be used is `RE_REDACT_CCN` that redacts all of the digits except the last 4. | `VARCHAR2` | `'.*(\d\d\d\d)$'` | +| `RE_PATTERN_US_SSN`: Searches the SSN number and replaces the identified pattern with the characters specified by the `regexp_replace_string` parameter.
`'\1-XXX-XXXX'` or `'XXX-XXX-\3'` will return `123-XXX-XXXX` or `XXX-XXX-6789` for the value `'123-45-6789'` respectively. | `VARCHAR2` | `'(\d\d\d)-(\d\d)-(\d\d\d\d)'` | +| | | | + +The below table illustrates the `regexp_replace_string` values that you can use during `REGEXP` based redaction. + +| Function Parameter | Data Type | Value | Description | +| ---------------------------- | ---------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `RE_REDACT_CC_MIDDLE_DIGITS` | `VARCHAR2` | `'\1XXXXXX\3'` | Redacts the middle digits of a credit card number according to the `regexp_pattern` parameter with the `RE_PATTERN_CC_L6_T4` format and replaces each redacted character with an `X`.

**Example:** The credit card number `1234 5678 9000 2490` becomes `1234 56XX XXXX 2490`. | +| `RE_REDACT_WITH_SINGLE_X` | `VARCHAR2` | `'X'` | Replaces the data with a single `X` character for each matching pattern as specified by setting the `regexp_pattern` parameter with the `RE_PATTERN_ANY_DIGIT` format.

**Example:** The credit card number `1234 5678 9000 2490` becomes `XXXX XXXX XXXX XXXX`. | +| `RE_REDACT_WITH_SINGLE_1` | `VARCHAR2` | `'1'` | Replaces the data with a single `1` digit for each of the data digits as specified by setting the `regexp_pattern` parameter with the `RE_PATTERN_ANY_DIGIT` format.

**Example:** The credit card number `1234 5678 9000 2490` becomes `1111 1111 1111 1111`. | +| `RE_REDACT_US_PHONE_L7` | `VARCHAR2` | `'\1-XXX-XXXX'` | Redacts the last 7 digits of U.S phone number according to the `regexp_pattern` parameter with the `RE_PATTERN_US_PHONE` format and replaces each redacted character with an `X`.

**Example:** The phone number `123-444-5900` becomes `123-XXX-XXXX`. | +| `RE_REDACT_EMAIL_NAME` | `VARCHAR2` | `'xxxx@\2'` | Redacts the email name according to the `regexp_pattern` parameter with the `RE_PATTERN_EMAIL_ADDRESS` format and replaces the email username with the four `x` characters.

**Example:** The email address `sjohn@example.com` becomes `xxxx@example.com`. | +| `RE_REDACT_EMAIL_DOMAIN` | `VARCHAR2` | `'\1@xxxxx.com'` | Redacts the email domain name according to the `regexp_pattern` parameter with the `RE_PATTERN_EMAIL_ADDRESS` format and replaces the domain with the five `x` characters.

**Example:** The email address `sjohn@example.com` becomes `sjohn@xxxxx.com`. | +| `RE_REDACT_EMAIL_ENTIRE` | `VARCHAR2` | `'xxxx@xxxxx.com'` | Redacts the entire email address according to the `regexp_pattern` parameter with the `RE_PATTERN_EMAIL_ADDRESS` format and replaces the email address with the `x` characters.

**Example:** The email address `sjohn@example.com` becomes `xxxx@xxxxx.com`. | +| `RE_REDACT_IP_L3` | `VARCHAR2` | `'\1.999'` | Redacts the last 3 digits of an IP address according to the `regexp_pattern` parameter with the `RE_PATTERN_IP_ADDRESS` format.

**Example:** The IP address `172.0.1.258` becomes `172.0.1.999`, which is an invalid IP address. | +| `RE_REDACT_AMEX_CCN` | `VARCHAR2` | `'**********\1'` | Redacts the first 10 digits of an American Express credit card number according to the `regexp_pattern` parameter with the `RE_PATTERN_AMEX_CCN` format.

**Example:** `123456789062816` becomes `**********62816`. | +| `RE_REDACT_CCN` | `VARCHAR2` | `'************\1'` | Redacts the first 12 digits of a credit card number as specified by the `regexp_pattern` parameter with the `RE_PATTERN_CCN` format.

**Example:** `8749012678345671` becomes `************5671`. | + +The following tables show the `regexp_position` value and `regexp_occurence` values that you can use during `REGEXP` based redaction. + +| Function Parameter | Data Type | Value | Description | +| ------------------ | --------- | ----- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `RE_BEGINNING` | `INTEGER` | `1` | Specifies the position of a character where search must begin. By default, the value is `1` that indicates the search begins at the first character of `source_char`. | + +| Function Parameter | Data Type | Value | Description | +| ------------------ | --------- | ----- | -------------------------------------------------------------------------------------------------------------------------------------- | +| `RE_ALL` | `INTEGER` | `0` | Specifies the replacement occurrence of a substring. If the value is `0`, then the replacement of each matching substring occurs. | +| `RE_FIRST` | `INTEGER` | `1` | Specifies the replacement occurrence of a substring. If the value is `1`, then the replacement of the first matching substring occurs. | + +The following table shows the `regexp_match_parameter` values that you can use during `REGEXP` based redaction which lets you change the default matching behavior of a function. + +| Function Parameter | Data Type | Value | Description | +| ---------------------- | ---------- | ----- | --------------------------------------------------------------------------------------------------------------- | +| `RE_CASE_SENSITIVE` | `VARCHAR2` | `'c'` | Specifies the case-sensitive matching. | +| `RE_CASE_INSENSITIVE` | `VARCHAR2` | `'i'` | Specifies the case-insensitive matching. | +| `RE_MULTIPLE_LINES` | `VARCHAR2` | `'m'` | Treats the source string as multiple lines but if you omit this parameter, then it indicates as a single line. | +| `RE_NEWLINE_WILDCARD` | `VARCHAR2` | `'n'` | Specifies the period (.), but if you omit this parameter, then the period does not match the newline character. | +| `RE_IGNORE_WHITESPACE` | `VARCHAR2` | `'x'` | Ignores the whitespace characters. | + +!!! Note + If you create a redaction policy based on a numeric type column, then make sure that the result after redaction is a number and accordingly set the replacement string to avoid runtime errors. + +!!! Note + If you create a redaction policy based on a character type column, then make sure that a length of the result after redaction is compatible with the column type and accordingly set the replacement string to avoid runtime errors. + +## ADD_POLICY + +The `add_policy` procedure creates a new data redaction policy for a table. + +```text +PROCEDURE add_policy ( + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2, + IN VARCHAR2, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN INTEGER DEFAULT DBMS_REDACT.FULL, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2, + IN BOOLEAN DEFAULT TRUE, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN INTEGER DEFAULT DBMS_REDACT.RE_BEGINNING, + IN INTEGER DEFAULT DBMS_REDACT.RE_ALL, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL + ) +``` + +**Parameters** + +`object_schema` + + Specifies the name of the schema in which the object resides and on which the data redaction policy will be applied. If you specify `NULL` then the given object is searched by the order specified by `search_path` setting. + +`object_name` + + Name of the table on which the data redaction policy is created. + +`policy_name` + + Name of the policy to be added. Ensure that the `policy_name` is unique for the table on which the policy is created. + +`policy_description` + + Specify the description of a redaction policy. + +`column_name` + + Name of the column to which the redaction policy applies. To redact more than one column, use the `alter_policy` procedure to add additional columns. + +`column_description` + + Description of the column to be redacted. The `column_description` is not supported, but if you specify the description for a column then, you will get a warning message. + +`function_type` + + The type of redaction function to be used. The possible values are `NONE, FULL, PARTIAL, RANDOM, REGEXP`, and `CUSTOM`. + +`function_parameters` + + Specifies the function parameters for the partition redaction and is applicable only for partial redaction. + +`expression` + + Specifies the Boolean expression for the table and determines how the policy is to be applied. The redaction occurs if this policy expression is evaluated to `TRUE`. + +`enable` + + When set to `TRUE`, the policy is enabled upon creation. The default is set as `TRUE`. When set to `FALSE`, the policy is disabled but the policy can be enabled by calling the `enable_policy` procedure. + +`regexp_pattern` + + Specifies the regular expression pattern to redact data. If the `regexp_pattern` does not match, then the `NULL` value is returned. + +`regexp_replace_string` + + Specifies the replacement string value. + +`regexp_position` + + Specifies the position of a character where search must begin. By default, the function parameter is `RE_BEGINNING`. + +`regexp_occurrence` + + Specifies the replacement occurrence of a substring. If the constant is `RE_ALL`, then the replacement of each matching substring occurs. If the constant is `RE_FIRST`, then the replacement of the first matching substring occurs. + +`regexp_match_parameter` + + Changes the default matching behavior of a function. The possible regexp_match_parameter constants can be `‘RE_CASE_SENSITIVE’, ‘RE_CASE_INSENSITIVE’, ‘RE_MULTIPLE_LINES’, ‘RE_NEWLINE_WILDCARD’, ‘RE_IGNORE_WHITESPACE’`. + + **Note**: For more information on `constants`, `function_parameters`, or `regexp` (regular expressions) see, Using `DBMS_REDACT Constants and Function Parameters`. + +`custom_function_expression` + + The `custom_function_expression` is applicable only for the `CUSTOM` redaction type. The `custom_function_expression` is a function expression that is, schema-qualified function with a parameter such as `schema_name.function_name (argument1, …)` that allows a user to use their redaction logic to redact the column data. + +**Example** + +The following example illustrates how to create a policy and use full redaction for values in the `payment_details_tab` table `customer id` column. + +```text +edb=# CREATE TABLE payment_details_tab ( +customer_id NUMBER NOT NULL, +card_string VARCHAR2(19) NOT NULL); +CREATE TABLE + +edb=# BEGIN + INSERT INTO payment_details_tab VALUES (4000, '1234-1234-1234-1234'); + INSERT INTO payment_details_tab VALUES (4001, '2345-2345-2345-2345'); +END; + +EDB-SPL Procedure successfully completed + +edb=# CREATE USER redact_user; +CREATE ROLE +edb=# GRANT SELECT ON payment_details_tab TO redact_user; +GRANT + +\c edb base_user + +BEGIN + DBMS_REDACT.add_policy( + object_schema => 'public', + object_name => 'payment_details_tab', + policy_name => 'redactPolicy_001', + policy_description => 'redactPolicy_001 for payment_details_tab table', + column_name => 'customer_id', + function_type => DBMS_REDACT.full, + expression => '1=1', + enable => TRUE); +END; +``` + +Redacted Result: + +```text +edb=# \c edb redact_user +You are now connected to database "edb" as user "redact_user". + +edb=> select customer_id from payment_details_tab order by 1; + customer_id +------------- + 0 + 0 +(2 rows) +``` + +## ALTER_POLICY + +The `alter_policy` procedure alters or modifies an existing data redaction policy for a table. + +```text +PROCEDURE alter_policy ( + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2, + IN VARCHAR2, + IN INTEGER DEFAULT DBMS_REDACT.ADD_COLUMN, + IN VARCHAR2 DEFAULT NULL, + IN INTEGER DEFAULT DBMS_REDACT.FULL, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN INTEGER DEFAULT DBMS_REDACT.RE_BEGINNING, + IN INTEGER DEFAULT DBMS_REDACT.RE_ALL, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL + ) +``` + +**Parameters** + +`object_schema` + + Specifies the name of the schema in which the object resides and on which the data redaction policy will be altered. If you specify `NULL` then the given object is searched by the order specified by `search_path` setting. + +`object_name` + + Name of the table to which to alter a data redaction policy. + +`policy_name` + + Name of the policy to be altered. + +`action` + + The action to perform. For more information about action parameters see, `DBMS_REDACT Constants and Function Parameters`. + +`column_name` + + Name of the column to which the redaction policy applies. + +`function_type` + + The type of redaction function to be used. The possible values are `NONE, FULL, PARTIAL, RANDOM, REGEXP`, and `CUSTOM`. + +`function_parameters` + + Specifies the function parameters for the redaction function. + +`expression` + + Specifies the Boolean expression for the table and determines how the policy is to be applied. The redaction occurs if this policy expression is evaluated to `TRUE`. + +`regexp_pattern` + + Enables the use of regular expressions to redact data. If the `regexp_pattern` does not match the data, then the `NULL` value is returned. + +`regexp_replace_string` + + Specifies the replacement string value. + +`regexp_position` + + Specifies the position of a character where search must begin. By default, the function parameter is `RE_BEGINNING`. + +`regexp_occurence` + + Specifies the replacement occurrence of a substring. If the constant is `RE_ALL`, then the replacement of each matching substring occurs. If the constant is `RE_FIRST`, then the replacement of the first matching substring occurs. + +`regexp_match_parameter` + + Changes the default matching behavior of a function. The possible regexp_match_parameter constants can be `‘RE_CASE_SENSITIVE’, ‘RE_CASE_INSENSITIVE’, ‘RE_MULTIPLE_LINES’, ‘RE_NEWLINE_WILDCARD’, ‘RE_IGNORE_WHITESPACE’`. + + **Note**: For more information on `constants, function_parameters`, or `regexp` (regular expressions) see, `Using DBMS_REDACT Constants and Function Parameters`. + +`policy_description` + + Specify the description of a redaction policy. + +`column_description` + + Description of the column to be redacted. The `column_description` is not supported, but if you specify the description for a column then, you will get a warning message. + +`custom_function_expression` + + The `custom_function_expression` is applicable only for the `CUSTOM` redaction type. The `custom_function_expression` is a function expression that is, schema-qualified function with a parameter such as `schema_name.function_name (argument1, …)` that allows a user to use their redaction logic to redact the column data. + +**Example** + +The following example illustrates to alter a policy using partial redaction for values in the `payment_details_tab` table `card_string` (usually a credit card number) column. + +```text +\c edb base _user + +BEGIN + DBMS_REDACT.alter_policy ( + object_schema => 'public', + object_name => 'payment_details_tab', + policy_name => 'redactPolicy_001', + action => DBMS_REDACT.ADD_COLUMN, + column_name => 'card_string', + function_type => DBMS_REDACT.partial, + function_parameters => DBMS_REDACT.REDACT_CCN16_F12); +END; +``` + +Redacted Result: + +```text +edb=# \c - redact_user +You are now connected to database "edb" as user "redact_user". +edb=> SELECT * FROM payment_details_tab; + customer_id | card_string +-------------+--------------------- + 0 | ****-****-****-1234 + 0 | ****-****-****-2345 +(2 rows) +``` + +## DISABLE_POLICY + +The `disable_policy` procedure disables an existing data redaction policy. + +```text +PROCEDURE disable_policy ( + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2, + IN VARCHAR2 + ) +``` + +**Parameters** + +`object_schema` + + Specifies the name of the schema in which the object resides and on which the data redaction policy will be applied. If you specify `NULL` then the given object is searched by the order specified by `search_path` setting. + +`object_name` + + Name of the table for which to disable a data redaction policy. + +`policy_name` + + Name of the policy to be disabled. + +**Example** + +The following example illustrates how to disable a policy. + +```text +\c edb base_user + +BEGIN + DBMS_REDACT.disable_policy( + object_schema => 'public', + object_name => 'payment_details_tab', + policy_name => 'redactPolicy_001'); +END; +``` + +Redacted Result: Data is no longer redacted after disabling a policy. + +## ENABLE_POLICY + +The `enable_policy` procedure enables the previously disabled data redaction policy. + +```text +PROCEDURE enable_policy ( + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2, + IN VARCHAR2 + ) +``` + +**Parameters** + +`object_schema` + + Specifies the name of the schema in which the object resides and on which the data redaction policy will be applied. If you specify `NULL` then the given object is searched by the order specified by `search_path` setting. + +`object_name` + + Name of the table to which to enable a data redaction policy. + +`policy_name` + + Name of the policy to be enabled. + +**Example** + +The following example illustrates how to enable a policy. + +```text +\c edb base_user + +BEGIN + DBMS_REDACT.enable_policy( + object_schema => 'public', + object_name => 'payment_details_tab', + policy_name => 'redactPolicy_001'); +END; +``` + +Redacted Result: Data is redacted after enabling a policy. + +## DROP_POLICY + +The `drop_policy` procedure drops a data redaction policy by removing the masking policy from a table. + +```text +PROCEDURE drop_policy ( + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2, + IN VARCHAR2 + ) +``` + +**Parameters** + +`object_schema` + + Specifies the name of the schema in which the object resides and on which the data redaction policy will be applied. If you specify `NULL` then the given object is searched by the order specified by `search_path` setting. + +`object_name` + + Name of the table from which to drop a data redaction policy. + +`policy_name` + + Name of the policy to be dropped. + +**Example** + +The following example illustrates how to drop a policy. + +```text +\c edb base_user + +BEGIN + DBMS_REDACT.drop_policy( + object_schema => 'public', + object_name => 'payment_details_tab', + policy_name => 'redactPolicy_001'); +END; +``` + +Redacted Result: The server drops the specified policy. + +## UPDATE_FULL_REDACTION_VALUES + +The `update_full_redaction_values` procedure updates the default displayed values for a data redaction policy and these default values can be viewed using the `redaction_values_for_type_full` view that use the full redaction type. + +```TEXT +PROCEDURE update_full_redaction_values ( + IN NUMBER DEFAULT NULL, + IN FLOAT4 DEFAULT NULL, + IN FLOAT8 DEFAULT NULL, + IN CHAR DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN NCHAR DEFAULT NULL, + IN NVARCHAR2 DEFAULT NULL, + IN DATE DEFAULT NULL, + IN TIMESTAMP DEFAULT NULL, + IN TIMESTAMPTZ DEFAULT NULL, + IN BLOB DEFAULT NULL, + IN CLOB DEFAULT NULL, + IN CLOB DEFAULT NULL + ) +``` + +**Parameters** + +`number_val` + + Updates the default value for columns of the `NUMBER` datatype. + +`binfloat_val` + + The `FLOAT4` datatype is a random value. The binary float datatype is not supported. + +`bindouble_val` + + The `FLOAT8` datatype is a random value. The binary double datatype is not supported. + +`char_val` + + Updates the default value for columns of the `CHAR` datatype. + +`varchar_val` + + Updates the default value for columns of the `VARCHAR2` datatype. + +`nchar_val` + + The `nchar_val` is mapped to `CHAR` datatype and returns the `CHAR` value. + +`nvarchar_val` + + The `nvarchar_val` is mapped to `VARCHAR2` datatype and returns the `VARCHAR` value. + +`datecol_val` + + Updates the default value for columns of the `DATE` datatype. + +`ts_val` + + Updates the default value for columns of the `TIMESTAMP` datatype. + +`tswtz_val` + + Updates the default value for columns of the `TIMESTAMPTZ` datatype. + +`blob_val` + + Updates the default value for columns of the `BLOB` datatype. + +`clob_val` + + Updates the default value for columns of the `CLOB` datatype. + +`nclob_val` + + The `nclob_val` is mapped to `CLOB` datatype and returns the `CLOB` value. + +**Example** + +The following example illustrates how to update the full redaction values but before updating the values, you can: + +View the default values using `redaction_values_for_type_full` view as shown below: + +```text +edb=# \x +Expanded display is on. +edb=# SELECT number_value, char_value, varchar_value, date_value, + timestamp_value, timestamp_with_time_zone_value, blob_value, +clob_value +FROM redaction_values_for_type_full; +-[ RECORD 1 ]------------------+-------------------------- +number_value | 0 +char_value | +varchar_value | +date_value | 01-JAN-01 00:00:00 +timestamp_value | 01-JAN-01 01:00:00 +timestamp_with_time_zone_value | 31-DEC-00 20:00:00 -05:00 +blob_value | \x5b72656461637465645d +clob_value | [redacted] +(1 row) +``` + +Now, update the default values for full redaction type. The `NULL` values will be ignored. + +```text +\c edb base_user + +edb=# BEGIN + DBMS_REDACT.update_full_redaction_values ( + number_val => 9999999, + char_val => 'Z', + varchar_val => 'V', + datecol_val => to_date('17/10/2018', 'DD/MM/YYYY'), + ts_val => to_timestamp('17/10/2018 11:12:13', 'DD/MM/YYYY HH24:MI:SS'), + tswtz_val => NULL, + blob_val => 'NEW REDACTED VALUE', + clob_val => 'NEW REDACTED VALUE'); +END; +``` + +You can now see the updated values using `redaction_values_for_type_full` view. + +```text +EDB-SPL Procedure successfully completed +edb=# SELECT number_value, char_value, varchar_value, date_value, + timestamp_value, timestamp_with_time_zone_value, blob_value, +clob_value +FROM redaction_values_for_type_full; +-[ RECORD 1 ]------------------+--------------------------------------- +number_value | 9999999 +char_value | Z +varchar_value | V +date_value | 17-OCT-18 00:00:00 +timestamp_value | 17-OCT-18 11:12:13 +timestamp_with_time_zone_value | 31-DEC-00 20:00:00 -05:00 +blob_value | \x4e45572052454441435445442056414c5545 +clob_value | NEW REDACTED VALUE +(1 row) +``` + +Redacted Result: + +```text +edb=# \c edb redact_user +You are now connected to database "edb" as user "redact_user". + +edb=> select * from payment_details_tab order by 1; + customer_id | card_string +-------------+------------- + 9999999 | V + 9999999 | V +(2 rows) +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/14_dbms_rls.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/14_dbms_rls.mdx new file mode 100644 index 00000000000..7fdb3ab36e5 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/14_dbms_rls.mdx @@ -0,0 +1,498 @@ +--- +title: "DBMS_RLS" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_rls.html" +--- + +The `DBMS_RLS` package enables the implementation of Virtual Private Database on certain Advanced Server database objects. + +| Function/Procedure | Function or Procedure | Return Type | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | ----------- | ------------------------------------------------ | +| `ADD_POLICY(object_schema, object_name, policy_name, function_schema, policy_function [, statement_types [, update_check [, enable [, static_policy [, policy_type [, long_predicate [, sec_relevant_cols [, sec_relevant_cols_opt ]]]]]]]])` | Procedure | n/a | Add a security policy to a database object. | +| `DROP_POLICY(object_schema, object_name, policy_name)` | Procedure | n/a | Remove a security policy from a database object. | +| `ENABLE_POLICY(object_schema, object_name, policy_name, enable)` | Procedure | n/a | Enable or disable a security policy. | + +Advanced Server's implementation of `DBMS_RLS` is a partial implementation when compared to Oracle's version. Only those functions and procedures listed in the table above are supported. + +*Virtual Private Database* is a type of fine-grained access control using security policies. *Fine-grained access control* in Virtual Private Database means that access to data can be controlled down to specific rows as defined by the security policy. + +The rules that encode a security policy are defined in a *policy function*, which is an SPL function with certain input parameters and return value. The *security policy* is the named association of the policy function to a particular database object, typically a table. + +!!! Note + In Advanced Server, the policy function can be written in any language supported by Advanced Server such as SQL, PL/pgSQL and SPL. + +!!! Note + The database objects currently supported by Advanced Server Virtual Private Database are tables. Policies cannot be applied to views or synonyms. + +The advantages of using Virtual Private Database are the following: + +- Provides a fine-grained level of security. Database object level privileges given by the `GRANT` command determine access privileges to the entire instance of a database object, while Virtual Private Database provides access control for the individual rows of a database object instance. +- A different security policy can be applied depending upon the type of SQL command (`INSERT, UPDATE, DELETE`, or `SELECT`). +- The security policy can vary dynamically for each applicable SQL command affecting the database object depending upon factors such as the session user of the application accessing the database object. +- Invocation of the security policy is transparent to all applications that access the database object and thus, individual applications do not have to be modified to apply the security policy. +- Once a security policy is enabled, it is not possible for any application (including new applications) to circumvent the security policy except by the system privilege noted by the following. +- Even superusers cannot circumvent the security policy except by the system privilege noted by the following. + +!!! Note + The only way security policies can be circumvented is if the `EXEMPT ACCESS POLICY` system privilege has been granted to a user. The `EXEMPT ACCESS POLICY` privilege should be granted with extreme care as a user with this privilege is exempted from all policies in the database. + +The `DBMS_RLS` package provides procedures to create policies, remove policies, enable policies, and disable policies. + +The process for implementing Virtual Private Database is as follows: + +- Create a policy function. The function must have two input parameters of type `VARCHAR2`. The first input parameter is for the schema containing the database object to which the policy is to apply and the second input parameter is for the name of that database object. The function must have a `VARCHAR2` return type. The function must return a string in the form of a `WHERE` clause predicate. This predicate is dynamically appended as an `AND` condition to the SQL command that acts upon the database object. Thus, rows that do not satisfy the policy function predicate are filtered out from the SQL command result set. +- Use the `ADD_POLICY` procedure to define a new policy, which is the association of a policy function with a database object. With the `ADD_POLICY` procedure, you can also specify the types of SQL commands (`INSERT, UPDATE, DELETE`, or `SELECT`) to which the policy is to apply, whether or not to enable the policy at the time of its creation, and if the policy should apply to newly inserted rows or the modified image of updated rows. +- Use the `ENABLE_POLICY` procedure to disable or enable an existing policy. +- Use the `DROP_POLICY` procedure to remove an existing policy. The `DROP_POLICY` procedure does not drop the policy function or the associated database object. + +Once policies are created, they can be viewed in the catalog views, compatible with Oracle databases: `ALL_POLICIES, DBA_POLICIES`, or `USER_POLICIES`. The supported compatible views are listed in the *Database Compatibility for Oracle Developers Catalog Views Guide*, available at the EDB website at: + +[https://www.enterprisedb.com/docs/](/epas/11/epas_compat_cat_views/) + +The `SYS_CONTEXT` function is often used with `DBMS_RLS`. The signature is: + +```text +SYS_CONTEXT(, ) +``` + +Where: + + `namespace` is a `VARCHAR2`; the only accepted value is `USERENV`. Any other value will return `NULL`. + + `attribute` is a `VARCHAR2`. `attribute` may be: + +| attribute Value | Equivalent Value | +| ---------------- | ----------------------------- | +| `SESSION_USER` | `pg_catalog.session_user` | +| `CURRENT_USER` | `pg_catalog.current_user` | +| `CURRENT_SCHEMA` | `pg_catalog.current_schema` | +| `HOST` | `pg_catalog.inet_host` | +| `IP_ADDRESS` | `pg_catalog.inet_client_addr` | +| `SERVER_HOST` | `pg_catalog.inet_server_addr` | + +!!! Note + The examples used to illustrate the `DBMS_RLS` package are based on a modified copy of the sample `emp` table provided with Advanced Server along with a role named `salesmgr` that is granted all privileges on the table. You can create the modified copy of the `emp` table named `vpemp` and the `salesmgr` role as shown by the following: + +```text +CREATE TABLE public.vpemp AS SELECT empno, ename, job, sal, comm, deptno +FROM emp; +ALTER TABLE vpemp ADD authid VARCHAR2(12); +UPDATE vpemp SET authid = 'researchmgr' WHERE deptno = 20; +UPDATE vpemp SET authid = 'salesmgr' WHERE deptno = 30; +SELECT * FROM vpemp; + +empno | ename | job | sal | comm | deptno | authid +-------+--------+-----------+---------+---------+--------+------------- + 7782 | CLARK | MANAGER | 2450.00 | | 10 | + 7839 | KING | PRESIDENT | 5000.00 | | 10 | + 7934 | MILLER | CLERK | 1300.00 | | 10 | + 7369 | SMITH | CLERK | 800.00 | | 20 | researchmgr + 7566 | JONES | MANAGER | 2975.00 | | 20 | researchmgr + 7788 | SCOTT | ANALYST | 3000.00 | | 20 | researchmgr + 7876 | ADAMS | CLERK | 1100.00 | | 20 | researchmgr + 7902 | FORD | ANALYST | 3000.00 | | 20 | researchmgr + 7499 | ALLEN | SALESMAN | 1600.00 | 300.00 | 30 | salesmgr + 7521 | WARD | SALESMAN | 1250.00 | 500.00 | 30 | salesmgr + 7654 | MARTIN | SALESMAN | 1250.00 | 1400.00 | 30 | salesmgr + 7698 | BLAKE | MANAGER | 2850.00 | | 30 | salesmgr + 7844 | TURNER | SALESMAN | 1500.00 | 0.00 | 30 | salesmgr + 7900 | JAMES | CLERK | 950.00 | | 30 | salesmgr +(14 rows) + +CREATE ROLE salesmgr WITH LOGIN PASSWORD 'password'; +GRANT ALL ON vpemp TO salesmgr; +``` + +## ADD_POLICY + +The `ADD_POLICY` procedure creates a new policy by associating a policy function with a database object. + +You must be a superuser to execute this procedure. + +```text +ADD_POLICY( VARCHAR2, VARCHAR2, + VARCHAR2, VARCHAR2, + VARCHAR2 + [, VARCHAR2 + [, BOOLEAN + [, BOOLEAN + [, BOOLEAN + [, INTEGER + [, BOOLEAN + [, VARCHAR2 + [, INTEGER ]]]]]]]]) +``` + +**Parameters** + +`object_schema` + + Name of the schema containing the database object to which the policy is to be applied. + +`object_name` + + Name of the database object to which the policy is to be applied. A given database object may have more than one policy applied to it. + +`policy_name` + + Name assigned to the policy. The combination of database object (identified by `object_schema` and `object_name`) and policy name must be unique within the database. + +`function_schema` + + Name of the schema containing the policy function. + + **Note**: The policy function may belong to a package in which case `function_schema` must contain the name of the schema in which the package is defined. + +`policy_function` + + Name of the SPL function that defines the rules of the security policy. The same function may be specified in more than one policy. + + **Note**: The policy function may belong to a package in which case `policy_function` must also contain the package name in dot notation (that is, `package_name.function_name`). + +`statement_types` + + Comma-separated list of SQL commands to which the policy applies. Valid SQL commands are `INSERT, UPDATE, DELETE`, and `SELECT`. The default is `INSERT,UPDATE,DELETE,SELECT`. + + **Note**: Advanced Server accepts `INDEX` as a statement type, but it is ignored. Policies are not applied to index operations in Advanced Server. + +`update_check` + + Applies to `INSERT` and `UPDATE` SQL commands only. + +- When set to `TRUE`, the policy is applied to newly inserted rows and to the modified image of updated rows. If any of the new or modified rows do not qualify according to the policy function predicate, then the `INSERT` or `UPDATE` command throws an exception and no rows are inserted or modified by the `INSERT` or `UPDATE` command. + +- When set to `FALSE`, the policy is not applied to newly inserted rows or the modified image of updated rows. Thus, a newly inserted row may not appear in the result set of a subsequent SQL command that invokes the same policy. Similarly, rows which qualified according to the policy prior to an `UPDATE` command may not appear in the result set of a subsequent SQL command that invokes the same policy. + +- The default is `FALSE`. + +`enable` + + When set to `TRUE`, the policy is enabled and applied to the SQL commands given by the `statement_types` parameter. When set to `FALSE` the policy is disabled and not applied to any SQL commands. The policy can be enabled using the `ENABLE_POLICY` procedure. The default is `TRUE`. + +`static_policy` + + In Oracle, when set to `TRUE`, the policy is *static*, which means the policy function is evaluated once per database object the first time it is invoked by a policy on that database object. The resulting policy function predicate string is saved in memory and reused for all invocations of that policy on that database object while the database server instance is running. + +- When set to `FALSE`, the policy is *dynamic*, which means the policy function is re-evaluated and the policy function predicate string regenerated for all invocations of the policy. + +- The default is `FALSE`. + + !!! Note + In Oracle 10g, the `policy_type` parameter was introduced, which is intended to replace the `static_policy` parameter. In Oracle, if the `policy_type` parameter is not set to its default value of `NULL`, the `policy_type` parameter setting overrides the `static_policy` setting. + + !!! Note + The setting of `static_policy` is ignored by Advanced Server. Advanced Server implements only the dynamic policy, regardless of the setting of the `static_policy` parameter. + +`policy_type` + + In Oracle, determines when the policy function is re-evaluated, and hence, if and when the predicate string returned by the policy function changes. The default is `NULL`. + + **Note**: The setting of this parameter is ignored by Advanced Server. Advanced Server always assumes a dynamic policy. + +`long_predicate` + + In Oracle, allows predicates up to 32K bytes if set to `TRUE`, otherwise predicates are limited to 4000 bytes. The default is `FALSE`. + + **Note**: The setting of this parameter is ignored by Advanced Server. An Advanced Server policy function can return a predicate of unlimited length for all practical purposes. + +`sec_relevant_cols` + + Comma-separated list of columns of `object_name`. Provides *column-level Virtual Private Database* for the listed columns. The policy is enforced if any of the listed columns are referenced in a SQL command of a type listed in `statement_types`. The policy is not enforced if no such columns are referenced. + + The default is `NULL`, which has the same effect as if all of the database object’s columns were included in `sec_relevant_cols`. + +`sec_relevant_cols_opt` + + In Oracle, if `sec_relevant_cols_opt` is set to `DBMS_RLS.ALL_ROWS (INTEGER` constant of value 1), then the columns listed in `sec_relevant_cols` return `NULL` on all rows where the applied policy predicate is false. (If `sec_relevant_cols_opt` is not set to `DBMS_RLS.ALL_ROWS`, these rows would not be returned at all in the result set.) The default is `NULL`. + + **Note**: Advanced Server does not support the `DBMS_RLS.ALL_ROWS` functionality. Advanced Server throws an error if `sec_relevant_cols_opt` is set to `DBMS_RLS.ALL_ROWS (INTEGER` value of 1). + +**Examples** + +This example uses the following policy function: + +```text +CREATE OR REPLACE FUNCTION verify_session_user ( + p_schema VARCHAR2, + p_object VARCHAR2 +) +RETURN VARCHAR2 +IS +BEGIN + RETURN 'authid = SYS_CONTEXT(''USERENV'', ''SESSION_USER'')'; +END; +``` + +This function generates the predicate `authid = SYS_CONTEXT('USERENV', 'SESSION_USER')`, which is added to the `WHERE` clause of any SQL command of the type specified in the `ADD_POLICY` procedure. + +This limits the effect of the SQL command to those rows where the content of the `authid` column is the same as the session user. + +!!! Note + This example uses the `SYS_CONTEXT` function to return the login user name. In Oracle the `SYS_CONTEXT` function is used to return attributes of an *application context*. The first parameter of the `SYS_CONTEXT` function is the name of an application context while the second parameter is the name of an attribute set within the application context. `USERENV` is a special built-in namespace that describes the current session. Advanced Server does not support application contexts, but only this specific usage of the `SYS_CONTEXT` function. + +The following anonymous block calls the `ADD_POLICY` procedure to create a policy named `secure_update` to be applied to the `vpemp` table using function `verify_session_user` whenever an `INSERT, UPDATE`, or `DELETE` SQL command is given referencing the `vpemp` table. + +```text +DECLARE + v_object_schema VARCHAR2(30) := 'public'; + v_object_name VARCHAR2(30) := 'vpemp'; + v_policy_name VARCHAR2(30) := 'secure_update'; + v_function_schema VARCHAR2(30) := 'enterprisedb'; + v_policy_function VARCHAR2(30) := 'verify_session_user'; + v_statement_types VARCHAR2(30) := 'INSERT,UPDATE,DELETE'; + v_update_check BOOLEAN := TRUE; + v_enable BOOLEAN := TRUE; +BEGIN + DBMS_RLS.ADD_POLICY( + v_object_schema, + v_object_name, + v_policy_name, + v_function_schema, + v_policy_function, + v_statement_types, + v_update_check, + v_enable + ); +END; +``` + +After successful creation of the policy, a terminal session is started by user `salesmgr`. The following query shows the content of the `vpemp` table: + +```text +edb=# \c edb salesmgr +Password for user salesmgr: +You are now connected to database "edb" as user "salesmgr". +edb=> SELECT * FROM vpemp; + empno | ename | job | sal | comm | deptno | authid +-------+--------+-----------+---------+---------+--------+------------- + 7782 | CLARK | MANAGER | 2450.00 | | 10 | + 7839 | KING | PRESIDENT | 5000.00 | | 10 | + 7934 | MILLER | CLERK | 1300.00 | | 10 | + 7369 | SMITH | CLERK | 800.00 | | 20 | researchmgr + 7566 | JONES | MANAGER | 2975.00 | | 20 | researchmgr + 7788 | SCOTT | ANALYST | 3000.00 | | 20 | researchmgr + 7876 | ADAMS | CLERK | 1100.00 | | 20 | researchmgr + 7902 | FORD | ANALYST | 3000.00 | | 20 | researchmgr + 7499 | ALLEN | SALESMAN | 1600.00 | 300.00 | 30 | salesmgr + 7521 | WARD | SALESMAN | 1250.00 | 500.00 | 30 | salesmgr + 7654 | MARTIN | SALESMAN | 1250.00 | 1400.00 | 30 | salesmgr + 7698 | BLAKE | MANAGER | 2850.00 | | 30 | salesmgr + 7844 | TURNER | SALESMAN | 1500.00 | 0.00 | 30 | salesmgr + 7900 | JAMES | CLERK | 950.00 | | 30 | salesmgr +(14 rows) +``` + +An unqualified `UPDATE` command (no `WHERE` clause) is issued by the `salesmgr` user: + +```text +edb=> UPDATE vpemp SET comm = sal * .75; +UPDATE 6 +``` + +Instead of updating all rows in the table, the policy restricts the effect of the update to only those rows where the `authid` column contains the value `salesmgr` as specified by the policy function predicate `authid = SYS_CONTEXT('USERENV', 'SESSION_USER')`. + +The following query shows that the `comm` column has been changed only for those rows where `authid` contains `salesmgr`. All other rows are unchanged. + +```text +edb=> SELECT * FROM vpemp; + empno | ename | job | sal | comm | deptno | authid +-------+--------+-----------+---------+---------+--------+------------- + 7782 | CLARK | MANAGER | 2450.00 | | 10 | + 7839 | KING | PRESIDENT | 5000.00 | | 10 | + 7934 | MILLER | CLERK | 1300.00 | | 10 | + 7369 | SMITH | CLERK | 800.00 | | 20 | researchmgr + 7566 | JONES | MANAGER | 2975.00 | | 20 | researchmgr + 7788 | SCOTT | ANALYST | 3000.00 | | 20 | researchmgr + 7876 | ADAMS | CLERK | 1100.00 | | 20 | researchmgr + 7902 | FORD | ANALYST | 3000.00 | | 20 | researchmgr + 7499 | ALLEN | SALESMAN | 1600.00 | 1200.00 | 30 | salesmgr + 7521 | WARD | SALESMAN | 1250.00 | 937.50 | 30 | salesmgr + 7654 | MARTIN | SALESMAN | 1250.00 | 937.50 | 30 | salesmgr + 7698 | BLAKE | MANAGER | 2850.00 | 2137.50 | 30 | salesmgr + 7844 | TURNER | SALESMAN | 1500.00 | 1125.00 | 30 | salesmgr + 7900 | JAMES | CLERK | 950.00 | 712.50 | 30 | salesmgr +(14 rows) +``` + +Furthermore, since the `update_check` parameter was set to `TRUE` in the `ADD_POLICY` procedure, the following `INSERT` command throws an exception since the value given for the `authid` column, `researchmgr`, does not match the session user, which is `salesmgr`, and hence, fails the policy. + +```text +edb=> INSERT INTO vpemp VALUES (9001,'SMITH','ANALYST',3200.00,NULL,20, +'researchmgr'); +ERROR: policy with check option violation +DETAIL: Policy predicate was evaluated to FALSE with the updated values +``` + +If `update_check` was set to `FALSE`, the preceding `INSERT` command would have succeeded. + +The following example illustrates the use of the `sec_relevant_cols` parameter to apply a policy only when certain columns are referenced in the SQL command. The following policy function is used for this example, which selects rows where the employee salary is less than `2000`. + +```text +CREATE OR REPLACE FUNCTION sal_lt_2000 ( + p_schema VARCHAR2, + p_object VARCHAR2 +) +RETURN VARCHAR2 +IS +BEGIN + RETURN 'sal < 2000'; +END +``` + +The policy is created so that it is enforced only if a `SELECT` command includes columns `sal` or `comm`: + +```text +DECLARE + v_object_schema VARCHAR2(30) := 'public'; + v_object_name VARCHAR2(30) := 'vpemp'; + v_policy_name VARCHAR2(30) := 'secure_salary'; + v_function_schema VARCHAR2(30) := 'enterprisedb'; + v_policy_function VARCHAR2(30) := 'sal_lt_2000'; + v_statement_types VARCHAR2(30) := 'SELECT'; + v_sec_relevant_cols VARCHAR2(30) := 'sal,comm'; +BEGIN + DBMS_RLS.ADD_POLICY( + v_object_schema, + v_object_name, + v_policy_name, + v_function_schema, + v_policy_function, + v_statement_types, + sec_relevant_cols => v_sec_relevant_cols + ); +END; +``` + +If a query does not reference columns `sal` or `comm`, then the policy is not applied. The following query returns all 14 rows of table `vpemp`: + +```text +edb=# SELECT empno, ename, job, deptno, authid FROM vpemp; + empno | ename | job | deptno | authid +-------+--------+-----------+--------+------------- + 7782 | CLARK | MANAGER | 10 | + 7839 | KING | PRESIDENT | 10 | + 7934 | MILLER | CLERK | 10 | + 7369 | SMITH | CLERK | 20 | researchmgr + 7566 | JONES | MANAGER | 20 | researchmgr + 7788 | SCOTT | ANALYST | 20 | researchmgr + 7876 | ADAMS | CLERK | 20 | researchmgr + 7902 | FORD | ANALYST | 20 | researchmgr + 7499 | ALLEN | SALESMAN | 30 | salesmgr + 7521 | WARD | SALESMAN | 30 | salesmgr + 7654 | MARTIN | SALESMAN | 30 | salesmgr + 7698 | BLAKE | MANAGER | 30 | salesmgr + 7844 | TURNER | SALESMAN | 30 | salesmgr + 7900 | JAMES | CLERK | 30 | salesmgr +(14 rows) +``` + +If the query references the `sal` or `comm` columns, then the policy is applied to the query eliminating any rows where `sal` is greater than or equal to `2000` as shown by the following: + +```text +edb=# SELECT empno, ename, job, sal, comm, deptno, authid FROM vpemp; + empno | ename | job | sal | comm | deptno | authid +-------+--------+----------+---------+---------+--------+------------- + 7934 | MILLER | CLERK | 1300.00 | | 10 | + 7369 | SMITH | CLERK | 800.00 | | 20 | researchmgr + 7876 | ADAMS | CLERK | 1100.00 | | 20 | researchmgr + 7499 | ALLEN | SALESMAN | 1600.00 | 1200.00 | 30 | salesmgr + 7521 | WARD | SALESMAN | 1250.00 | 937.50 | 30 | salesmgr + 7654 | MARTIN | SALESMAN | 1250.00 | 937.50 | 30 | salesmgr + 7844 | TURNER | SALESMAN | 1500.00 | 1125.00 | 30 | salesmgr + 7900 | JAMES | CLERK | 950.00 | 712.50 | 30 | salesmgr +(8 rows) +``` + +## DROP_POLICY + +The `DROP_POLICY` procedure deletes an existing policy. The policy function and database object associated with the policy are not deleted by the `DROP_POLICY` procedure. + +You must be a superuser to execute this procedure. + +```text +DROP_POLICY( VARCHAR2, VARCHAR2, + VARCHAR2) +``` + +**Parameters** + +`object_schema` + + Name of the schema containing the database object to which the policy applies. + +`object_name` + + Name of the database object to which the policy applies. + +`policy_name` + + Name of the policy to be deleted. + +**Examples** + +The following example deletes policy `secure_update` on table `public.vpemp`: + +```text +DECLARE + v_object_schema VARCHAR2(30) := 'public'; + v_object_name VARCHAR2(30) := 'vpemp'; + v_policy_name VARCHAR2(30) := 'secure_update'; +BEGIN + DBMS_RLS.DROP_POLICY( + v_object_schema, + v_object_name, + v_policy_name + ); +END; +``` + +## ENABLE_POLICY + +The `ENABLE_POLICY` procedure enables or disables an existing policy on the specified database object. + +You must be a superuser to execute this procedure. + +```text +ENABLE_POLICY( VARCHAR2, VARCHAR2, + VARCHAR2, BOOLEAN) +``` + +**Parameters** + +`object_schema` + + Name of the schema containing the database object to which the policy applies. + +`object_name` + + Name of the database object to which the policy applies. + +`policy_name` + + Name of the policy to be enabled or disabled. + +`enable` + + When set to `TRUE`, the policy is enabled. When set to `FALSE`, the policy is disabled. + +**Examples** + +The following example disables policy `secure_update` on table `public.vpemp`: + +```text +DECLARE + v_object_schema VARCHAR2(30) := 'public'; + v_object_name VARCHAR2(30) := 'vpemp'; + v_policy_name VARCHAR2(30) := 'secure_update'; + v_enable BOOLEAN := FALSE; +BEGIN + DBMS_RLS.ENABLE_POLICY( + v_object_schema, + v_object_name, + v_policy_name, + v_enable + ); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/01_using_calendar_syntax_to_specify_a_repeating_interval.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/01_using_calendar_syntax_to_specify_a_repeating_interval.mdx new file mode 100644 index 00000000000..ab386d98016 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/01_using_calendar_syntax_to_specify_a_repeating_interval.mdx @@ -0,0 +1,38 @@ +--- +title: "Using Calendar Syntax to Specify a Repeating Interval" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/using_calendar_syntax_to_specify_a_repeating_interval.html" +--- + +The `CREATE_JOB` and `CREATE_SCHEDULE` procedures use Oracle-styled calendar syntax to define the interval with which a job or schedule is repeated. You should provide the scheduling information in the `repeat_interval` parameter of each procedure. + +`repeat_interval` is a value (or series of values) that define the interval between the executions of the scheduled job. Each value is composed of a token, followed by an equal sign, followed by the unit (or units) on which the schedule will execute. Multiple token values must be separated by a semi-colon (;). + +For example, the following value: + + `FREQ=DAILY;BYDAY=MON,TUE,WED,THU,FRI;BYHOUR=17;BYMINUTE=45` + +Defines a schedule that is executed each weeknight at 5:45. + +The token types and syntax described in the table below are supported by Advanced Server: + +| Token type | Syntax | Valid Values | +| ------------ | ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `FREQ` | `FREQ=predefined_interval` | Where `predefined_interval` is one of the following: `YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY`. The `SECONDLY` keyword is not supported. | +| `BYMONTH` | `BYMONTH=month(, month)...` | Where `month` is the three-letter abbreviation of the month name: `JAN \| FEB \| MAR \| APR \| MAY \| JUN \| JUL \| AUG \| SEP \| OCT \| NOV \| DEC` | +| `BYMONTH` | `BYMONTH=month (, month)...` | Where `month` is the numeric value representing the month: `1 \| 2 \| 3 \| 4 \| 5 \| 6 \| 7 \| 8 \| 9 \| 10 \| 11 \| 12` | +| `BYMONTHDAY` | `BYMONTHDAY=day_of_month` | Where `day_of_month` is a value from `1` through `31` | +| `BYDAY` | `BYDAY=weekday` | Where `weekday` is a three-letter abbreviation or single-digit value representing the day of the week. | +| | | `Monday` \| `MON` \| `1` \| | +| | | `Tuesday` \| `TUE` \| `2` \| | +| | | `Wednesday` \| `WED` \| `3` \| | +| | | `Thursday` \| `THU` \| `4` \| | +| | | `Friday` \| `FRI` \| `5` \| | +| | | `Saturday` \| `SAT` \| `6` \| | +| | | `Sunday` \| `SUN` \| `7` \| | +| `BYDATE` | `BYDATE=date(, date)...` | Where date is `YYYYMMDD`.

`YYYY` is a four-digit year representation of the year,
`MM` is a two-digit representation of the month,
and `DD` is a two-digit day representation of the day. | +| `BYDATE` | `BYDATE=date(, date)...` | Where date is `MMDD`.

`MM` is a two-digit representation of the month,
and `DD` is a two-digit day representation of the day | +| `BYHOUR` | `BYHOUR=hour` | Where `hour` is a value from `0` through `23`. | +| `BYMINUTE` | `BYMINUTE=minute` | Where `minute` is a value from `0` through `59`. | diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/02_create_job.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/02_create_job.mdx new file mode 100644 index 00000000000..96f287ba26b --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/02_create_job.mdx @@ -0,0 +1,115 @@ +--- +title: "CREATE_JOB" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/create_job.html" +--- + +Use the `CREATE_JOB` procedure to create a job. The procedure comes in two forms; the first form of the procedure specifies a schedule within the job definition, as well as a job action that will be invoked when the job executes: + +```text +CREATE_JOB( + IN VARCHAR2, + IN VARCHAR2, + IN VARCHAR2, + IN PLS_INTEGER DEFAULT 0, + IN TIMESTAMP WITH TIME ZONE DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL, + IN TIMESTAMP WITH TIME ZONE DEFAULT NULL, + IN VARCHAR2 DEFAULT 'DEFAULT_JOB_CLASS', + IN BOOLEAN DEFAULT FALSE, + IN BOOLEAN DEFAULT TRUE, + IN VARCHAR2 DEFAULT NULL) +``` + +The second form uses a job schedule to specify the schedule on which the job will execute, and specifies the name of a program that will execute when the job runs: + +```text +CREATE_JOB( + IN VARCHAR2, + IN VARCHAR2, + IN VARCHAR2, + IN VARCHAR2 DEFAULT 'DEFAULT_JOB_CLASS', + IN BOOLEAN DEFAULT FALSE, + IN BOOLEAN DEFAULT TRUE, + IN VARCHAR2 DEFAULT NULL) +``` + +**Parameters** + +`job_name` + + `job_name` specifies the optionally schema-qualified name of the job being created. + +`job_type` + + `job_type` specifies the type of job. The current implementation of `CREATE_JOB` supports a job type of `PLSQL_BLOCK` or `STORED_PROCEDURE`. + +`job_action` + +- If `job_type` is `PLSQL_BLOCK`, `job_action` specifies the content of the PL/SQL block that will be invoked when the job executes. The block must be terminated with a semi-colon (;). + +- If `job_type` is `STORED_PROCEDURE`, `job_action` specifies the optionally schema-qualified name of the procedure. + +`number_of_arguments` + + `number_of_arguments` is an `INTEGER` value that specifies the number of arguments expected by the job. The default is `0`. + +`start_date` + + `start_date` is a `TIMESTAMP WITH TIME ZONE` value that specifies the first time that the job is scheduled to execute. The default value is `NULL`, indicating that the job should be scheduled to execute when the job is enabled. + +`repeat_interval` + + `repeat_interval` is a `VARCHAR2` value that specifies how often the job will repeat. If a `repeat_interval` is not specified, the job will execute only once. The default value is `NULL`. + +`end_date` + + `end_date` is a `TIMESTAMP WITH TIME ZONE` value that specifies a time after which the job will no longer execute. If a date is specified, the `end_date` must be after `start_date`. The default value is `NULL`. + + Please note that if an `end_date` is not specified and a `repeat_interval` is specified, the job will repeat indefinitely until it is disabled. + +`program_name` + + `program_name` is the name of a program that will be executed by the job. + +`schedule_name` + + `schedule_name` is the name of the schedule associated with the job. + +`job_class` + + `job_class` is accepted for compatibility and ignored. + +`enabled` + + `enabled` is a `BOOLEAN` value that specifies if the job is enabled when created. By default, a job is created in a disabled state, with `enabled` set to `FALSE`. To enable a job, specify a value of `TRUE` when creating the job, or enable the job with the `DBMS_SCHEDULER.ENABLE` procedure. + +`auto_drop` + + The `auto_drop` parameter is accepted for compatibility and is ignored. By default, a job's status will be changed to `DISABLED` after the time specified in `end_date`. + +`comments` + + Use the `comments` parameter to specify a comment about the job. + +**Example** + +The following example demonstrates a call to the `CREATE_JOB` procedure: + +```text +EXEC + DBMS_SCHEDULER.CREATE_JOB ( + job_name => 'update_log', + job_type => 'PLSQL_BLOCK', + job_action => 'BEGIN INSERT INTO my_log VALUES(current_timestamp); + END;', + start_date => '01-JUN-15 09:00:00.000000', + repeat_interval => 'FREQ=DAILY;BYDAY=MON,TUE,WED,THU,FRI;BYHOUR=17;', + end_date => NULL, + enabled => TRUE, + comments => 'This job adds a row to the my_log table.'); +``` + +The code fragment creates a job named `update_log` that executes each weeknight at 5:00. The job executes a PL/SQL block that inserts the current timestamp into a logfile (`my_log`). Since no `end_date` is specified, the job will execute until it is disabled by the `DBMS_SCHEDULER.DISABLE` procedure. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/03_create_program.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/03_create_program.mdx new file mode 100644 index 00000000000..639fbb972d0 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/03_create_program.mdx @@ -0,0 +1,71 @@ +--- +title: "CREATE_PROGRAM" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/create_program.html" +--- + +Use the `CREATE_PROGRAM` procedure to create a `DBMS_SCHEDULER` program. The signature is: + +```text +CREATE_PROGRAM( + IN VARCHAR2, + IN VARCHAR2, + IN VARCHAR2, + IN PLS_INTEGER DEFAULT 0, + IN BOOLEAN DEFAULT FALSE, + IN VARCHAR2 DEFAULT NULL) +``` + +**Parameters** + +`program_name` + + `program_name` specifies the name of the program that is being created. + +`program_type` + + `program_type` specifies the type of program. The current implementation of `CREATE_PROGRAM` supports a `program_type` of `PLSQL_BLOCK` or `PROCEDURE`. + +`program_action` + +- If `program_type` is `PLSQL_BLOCK, program_action` contains the PL/SQL block that will execute when the program is invoked. The PL/SQL block must be terminated with a semi-colon (;). + +- If `program_type` is `PROCEDURE`, `program_action` contains the name of the stored procedure. + +`number_of_arguments` + +- If `program_type` is `PLSQL_BLOCK`, this argument is ignored. + +- If `program_type` is `PROCEDURE`, `number_of_arguments` specifies the number of arguments required by the procedure. The default value is `0`. + +`enabled` + + `enabled` specifies if the program is created enabled or disabled: + +- If `enabled` is `TRUE`, the program is created enabled. +- If `enabled` is `FALSE`, the program is created disabled; use the `DBMS_SCHEDULER.ENABLE` program to enable a disabled program. + + The default value is `FALSE`. + +`comments` + + Use the `comments` parameter to specify a comment about the program; by default, this parameter is `NULL`. + +**Example** + +The following call to the `CREATE_PROGRAM` procedure creates a program named `update_log`: + +```text +EXEC + DBMS_SCHEDULER.CREATE_PROGRAM ( + program_name => 'update_log', + program_type => 'PLSQL_BLOCK', + program_action => 'BEGIN INSERT INTO my_log VALUES(current_timestamp); + END;', + enabled => TRUE, + comment => 'This program adds a row to the my_log table.'); +``` + +`update_log` is a PL/SQL block that adds a row containing the current date and time to the `my_log` table. The program will be enabled when the `CREATE_PROGRAM` procedure executes. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/04_create_schedule.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/04_create_schedule.mdx new file mode 100644 index 00000000000..5cc8d9e5c42 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/04_create_schedule.mdx @@ -0,0 +1,59 @@ +--- +title: "CREATE_SCHEDULE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/create_schedule.html" +--- + +Use the `CREATE_SCHEDULE` procedure to create a job schedule. The signature of the `CREATE_SCHEDULE` procedure is: + +```text +CREATE_SCHEDULE( + IN VARCHAR2, + IN TIMESTAMP WITH TIME ZONE DEFAULT NULL, + IN VARCHAR2, + IN TIMESTAMP WITH TIME ZONE DEFAULT NULL, + IN VARCHAR2 DEFAULT NULL) +``` + +**Parameters** + +`schedule_name` + + `schedule_name` specifies the name of the schedule. + +`start_date` + + `start_date` is a `TIMESTAMP WITH TIME ZONE` value that specifies the date and time that the schedule is eligible to execute. If a `start_date` is not specified, the date that the job is enabled is used as the `start_date`. By default, `start_date` is `NULL`. + +`repeat_interval` + + `repeat_interval` is a `VARCHAR2` value that specifies how often the job will repeat. If a `repeat_interval` is not specified, the job will execute only once, on the date specified by `start_date`. + + **Note**: You must provide a value for either `start_date` or `repeat_interval`; if both `start_date` and `repeat_interval` are `NULL`, the server will return an error. + +`end_date` + + `end_date` is a `TIMESTAMP WITH TIME ZONE` value that specifies a time after which the schedule will no longer execute. If a date is specified, the `end_date` must be after the `start_date`. The default value is `NULL`. + + **Note**: If a `repeat_interval` is specified and an `end_date` is not specified, the schedule will repeat indefinitely until it is disabled. + +`comments` + + Use the `comments` parameter to specify a comment about the schedule; by default, this parameter is `NULL`. + +**Example** + +The following code fragment calls `CREATE_SCHEDULE` to create a schedule named `weeknights_at_5`: + +```text +EXEC + DBMS_SCHEDULER.CREATE_SCHEDULE ( + schedule_name => 'weeknights_at_5', + start_date => '01-JUN-13 09:00:00.000000' + repeat_interval => 'FREQ=DAILY;BYDAY=MON,TUE,WED,THU,FRI;BYHOUR=17;', + comments => 'This schedule executes each weeknight at 5:00'); +``` + +The schedule executes each weeknight, at 5:00 pm, effective after June 1, 2013. Since no `end_date` is specified, the schedule will execute indefinitely until it is disabled with `DBMS_SCHEDULER.DISABLE`. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/05_define_program_argument.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/05_define_program_argument.mdx new file mode 100644 index 00000000000..b023f316224 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/05_define_program_argument.mdx @@ -0,0 +1,78 @@ +--- +title: "DEFINE_PROGRAM_ARGUMENT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/define_program_argument.html" +--- + +Use the `DEFINE_PROGRAM_ARGUMENT` procedure to define a program argument. The `DEFINE_PROGRAM_ARGUMENT` procedure comes in two forms; the first form defines an argument with a default value: + +```text +DEFINE_PROGRAM_ARGUMENT( + IN VARCHAR2, + IN PLS_INTEGER, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2, + IN VARCHAR2, + IN BOOLEAN DEFAULT FALSE) +``` + +The second form defines an argument without a default value: + +```text +DEFINE_PROGRAM_ARGUMENT( + IN VARCHAR2, + IN PLS_INTEGER, + IN VARCHAR2 DEFAULT NULL, + IN VARCHAR2, + IN BOOLEAN DEFAULT FALSE) +``` + +**Parameters** + +`program_name` + + `program_name` is the name of the program to which the arguments belong. + +`argument_position` + + `argument_position` specifies the position of the argument as it is passed to the program. + +`argument_name` + + `argument_name` specifies the optional name of the argument. By default, `argument_name` is `NULL`. + +`argument_type IN VARCHAR2` + + `argument_type` specifies the data type of the argument. + +`default_value` + + `default_value` specifies the default value assigned to the argument. `default_value` will be overridden by a value specified by the job when the job executes. + +`out_argument IN BOOLEAN DEFAULT FALSE` + + `out_argument` is not currently used; if specified, the value must be `FALSE`. + +**Example** + +The following code fragment uses the `DEFINE_PROGRAM_ARGUMENT` procedure to define the first and second arguments in a program named `add_emp`: + +```text +EXEC + DBMS_SCHEDULER.DEFINE_PROGRAM_ARGUMENT( + program_name => 'add_emp', + argument_position => 1, + argument_name => 'dept_no', + argument_type => 'INTEGER, + default_value => '20'); +EXEC + DBMS_SCHEDULER.DEFINE_PROGRAM_ARGUMENT( + program_name => 'add_emp', + argument_position => 2, + argument_name => 'emp_name', + argument_type => 'VARCHAR2'); +``` + +The first argument is an `INTEGER` value named `dept_no` that has a default value of `20`. The second argument is a `VARCHAR2` value named `emp_name`; the second argument does not have a default value. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/06_dbms_scheduler_disable.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/06_dbms_scheduler_disable.mdx new file mode 100644 index 00000000000..dc0981c03fe --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/06_dbms_scheduler_disable.mdx @@ -0,0 +1,42 @@ +--- +title: "DISABLE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_scheduler_disable.html" +--- + + + +Use the `DISABLE` procedure to disable a program or a job. The signature of the `DISABLE` procedure is: + +```text +DISABLE( + IN VARCHAR2, + IN BOOLEAN DEFAULT FALSE, + IN VARCHAR2 DEFAULT 'STOP_ON_FIRST_ERROR') +``` + +**Parameters** + +`name` + + `name` specifies the name of the program or job that is being disabled. + +`force` + + `force` is accepted for compatibility, and ignored. + +`commit_semantics` + + `commit_semantics` instructs the server how to handle an error encountered while disabling a program or job. By default, `commit_semantics` is set to `STOP_ON_FIRST_ERROR`, instructing the server to stop when it encounters an error. Any programs or jobs that were successfully disabled prior to the error will be committed to disk. + + The `TRANSACTIONAL` and `ABSORB_ERRORS` keywords are accepted for compatibility, and ignored. + +**Example** + +The following call to the `DISABLE` procedure disables a program named `update_emp`: + +```text +DBMS_SCHEDULER.DISABLE('update_emp'); +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/07_drop_job.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/07_drop_job.mdx new file mode 100644 index 00000000000..05f4d823e4c --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/07_drop_job.mdx @@ -0,0 +1,45 @@ +--- +title: "DROP_JOB" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/drop_job.html" +--- + +Use the `DROP_JOB` procedure to `DROP` a job, `DROP` any arguments that belong to the job, and eliminate any future job executions. The signature of the procedure is: + +```text +DROP_JOB( + IN VARCHAR2, + IN BOOLEAN DEFAULT FALSE, + IN BOOLEAN DEFAULT FALSE, + IN VARCHAR2 DEFAULT 'STOP_ON_FIRST_ERROR') +``` + +**Parameters** + +`job_name` + + `job_name` specifies the name of the job that is being dropped. + +`force` + + `force` is accepted for compatibility, and ignored. + +`defer` + + `defer` is accepted for compatibility, and ignored. + +`commit_semantics` + + `commit_semantics` instructs the server how to handle an error encountered while dropping a program or job. By default, `commit_semantics` is set to `STOP_ON_FIRST_ERROR`, instructing the server to stop when it encounters an error. + + The `TRANSACTIONAL` and `ABSORB_ERRORS` keywords are accepted for compatibility, and ignored. + +**Example** + +The following call to `DROP_JOB` drops a job named `update_log`: + +```text +DBMS_SCHEDULER.DROP_JOB('update_log'); +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/08_drop_program.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/08_drop_program.mdx new file mode 100644 index 00000000000..d51e87cafef --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/08_drop_program.mdx @@ -0,0 +1,39 @@ +--- +title: "DROP_PROGRAM" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/drop_program.html" +--- + +The `DROP_PROGRAM` procedure to drop a program. The signature of the `DROP_PROGRAM` procedure is: + +```text +DROP_PROGRAM( + IN VARCHAR2, + IN BOOLEAN DEFAULT FALSE) +``` + +**Parameters** + +`program_name` + + `program_name` specifies the name of the program that is being dropped. + +`force` + + `force` is a `BOOLEAN` value that instructs the server how to handle programs with dependent jobs. + +- Specify `FALSE` to instruct the server to return an error if the program is referenced by a job. + +- Specify `TRUE` to instruct the server to disable any jobs that reference the program before dropping the program. + + The default value is `FALSE`. + +**Example** + +The following call to `DROP_PROGRAM` drops a job named `update_emp`: + +```text +DBMS_SCHEDULER.DROP_PROGRAM('update_emp'); +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/09_drop_program_argument.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/09_drop_program_argument.mdx new file mode 100644 index 00000000000..2b7e5cf43e6 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/09_drop_program_argument.mdx @@ -0,0 +1,51 @@ +--- +title: "DROP_PROGRAM_ARGUMENT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/drop_program_argument.html" +--- + +Use the `DROP_PROGRAM_ARGUMENT` procedure to drop a program argument. The `DROP_PROGRAM_ARGUMENT` procedure comes in two forms; the first form uses an argument position to specify which argument to drop: + +```text +DROP_PROGRAM_ARGUMENT( + IN VARCHAR2, + IN PLS_INTEGER) +``` + +The second form takes the argument name: + +```text +DROP_PROGRAM_ARGUMENT( + IN VARCHAR2, + IN VARCHAR2) +``` + +**Parameters** + +`program_name` + + `program_name` specifies the name of the program that is being modified. + +`argument_position` + + `argument_position` specifies the position of the argument that is being dropped. + +`argument_name` + + `argument_name` specifies the name of the argument that is being dropped. + +**Examples** + +The following call to `DROP_PROGRAM_ARGUMENT` drops the first argument in the `update_emp` program: + +```text +DBMS_SCHEDULER.DROP_PROGRAM_ARGUMENT('update_emp', 1); +``` + +The following call to `DROP_PROGRAM_ARGUMENT` drops an argument named `emp_name`: + +```text +DBMS_SCHEDULER.DROP_PROGRAM_ARGUMENT(update_emp', 'emp_name'); +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/10_drop_schedule.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/10_drop_schedule.mdx new file mode 100644 index 00000000000..0079e41d2ca --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/10_drop_schedule.mdx @@ -0,0 +1,38 @@ +--- +title: "DROP_SCHEDULE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/drop_schedule.html" +--- + +Use the `DROP_SCHEDULE` procedure to drop a schedule. The signature is: + +```text +DROP_SCHEDULE( + IN VARCHAR2, + IN BOOLEAN DEFAULT FALSE) +``` + +**Parameters** + +`schedule_name` + + `schedule_name` specifies the name of the schedule that is being dropped. + +`force` + + `force` specifies the behavior of the server if the specified schedule is referenced by any job: + +- Specify `FALSE` to instruct the server to return an error if the specified schedule is referenced by a job. This is the default behavior. +- Specify `TRUE` to instruct the server to disable to any jobs that use the specified schedule before dropping the schedule. Any running jobs will be allowed to complete before the schedule is dropped. + +**Example** + +The following call to `DROP_SCHEDULE` drops a schedule named `weeknights_at_5`: + +```text +DBMS_SCHEDULER.DROP_SCHEDULE('weeknights_at_5', TRUE); +``` + +The server will disable any jobs that use the schedule before dropping the schedule. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/11_dbms_scheduler_enable.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/11_dbms_scheduler_enable.mdx new file mode 100644 index 00000000000..71c15a3825c --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/11_dbms_scheduler_enable.mdx @@ -0,0 +1,39 @@ +--- +title: "ENABLE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_scheduler_enable.html" +--- + + + +Use the `ENABLE` procedure to enable a disabled program or job. + +The signature of the `ENABLE` procedure is: + +```text +ENABLE( + IN VARCHAR2, + IN VARCHAR2 DEFAULT 'STOP_ON_FIRST_ERROR') +``` + +**Parameters** + +`name` + + `name` specifies the name of the program or job that is being enabled. + +`commit_semantics` + + `commit_semantics` instructs the server how to handle an error encountered while enabling a program or job. By default, `commit_semantics` is set to `STOP_ON_FIRST_ERROR`, instructing the server to stop when it encounters an error. + + The `TRANSACTIONAL` and `ABSORB_ERRORS` keywords are accepted for compatibility, and ignored. + +**Example** + +The following call to `DBMS_SCHEDULER.ENABLE` enables the `update_emp` program: + +```text +DBMS_SCHEDULER.ENABLE('update_emp'); +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/12_evaluate_calendar_string.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/12_evaluate_calendar_string.mdx new file mode 100644 index 00000000000..ffd2132e69d --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/12_evaluate_calendar_string.mdx @@ -0,0 +1,63 @@ +--- +title: "EVALUATE_CALENDAR_STRING" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/evaluate_calendar_string.html" +--- + +Use the `EVALUATE_CALENDAR_STRING` procedure to evaluate the `repeat_interval` value specified when creating a schedule with the `CREATE_SCHEDULE` procedure. The `EVALUATE_CALENDAR_STRING` procedure will return the date and time that a specified schedule will execute without actually scheduling the job. + +The signature of the `EVALUATE_CALENDAR_STRING` procedure is: + +```text +EVALUATE_CALENDAR_STRING( + IN VARCHAR2, + IN TIMESTAMP WITH TIME ZONE, + IN TIMESTAMP WITH TIME ZONE, + OUT TIMESTAMP WITH TIME ZONE) +``` + +**Parameters** + +`calendar_string` + + `calendar_string` is the calendar string that describes a `repeat_interval` that is being evaluated. + +`start_date IN TIMESTAMP WITH TIME ZONE` + + `start_date` is the date and time after which the `repeat_interval` will become valid. + +`return_date_after` + + Use the `return_date_after` parameter to specify the date and time that `EVALUATE_CALENDAR_STRING` should use as a starting date when evaluating the `repeat_interval`. + + For example, if you specify a `return_date_after` value of `01-APR-13 09.00.00.000000, EVALUATE_CALENDAR_STRING` will return the date and time of the first iteration of the schedule after April 1st, 2013. + +`next_run_date OUT TIMESTAMP WITH TIME ZONE` + + `next_run_date` is an `OUT` parameter that will contain the first occurrence of the schedule after the date specified by the `return_date_after` parameter. + +**Example** + +The following example evaluates a calendar string and returns the first date and time that the schedule will be executed after June 15, 2013: + +```text +DECLARE + result TIMESTAMP; +BEGIN + + DBMS_SCHEDULER.EVALUATE_CALENDAR_STRING + ( + 'FREQ=DAILY;BYDAY=MON,TUE,WED,THU,FRI;BYHOUR=17;', + '15-JUN-2013', NULL, result + ); + + DBMS_OUTPUT.PUT_LINE('next_run_date: ' || result); +END; +/ + +next_run_date: 17-JUN-13 05.00.00.000000 PM +``` + +June 15, 2013 is a Saturday; the schedule will not execute until Monday, June 17, 2013 at 5:00 pm. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/13_run_job.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/13_run_job.mdx new file mode 100644 index 00000000000..5e00845397e --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/13_run_job.mdx @@ -0,0 +1,35 @@ +--- +title: "RUN_JOB" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/run_job.html" +--- + +Use the `RUN_JOB` procedure to execute a job immediately. The signature of the `RUN_JOB` procedure is: + +```text +RUN_JOB( + IN VARCHAR2, + IN BOOLEAN DEFAULT TRUE +``` + +**Parameters** + +`job_name` + + `job_name` specifies the name of the job that will execute. + +`use_current_session` + + By default, the job will execute in the current session. If specified, `use_current_session` must be set to `TRUE` ; if `use_current_session` is set to `FALSE`, Advanced Server will return an error. + +**Example** + +The following call to `RUN_JOB` executes a job named `update_log`: + +```text +DBMS_SCHEDULER.RUN_JOB('update_log', TRUE); +``` + +Passing a value of `TRUE` as the second argument instructs the server to invoke the job in the current session. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/14_set_job_argument_value.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/14_set_job_argument_value.mdx new file mode 100644 index 00000000000..f3151869b7a --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/14_set_job_argument_value.mdx @@ -0,0 +1,59 @@ +--- +title: "SET_JOB_ARGUMENT_VALUE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/set_job_argument_value.html" +--- + +Use the `SET_JOB_ARGUMENT_VALUE` procedure to specify a value for an argument. The `SET_JOB_ARGUMENT_VALUE` procedure comes in two forms; the first form specifies which argument should be modified by position: + +```text +SET_JOB_ARGUMENT_VALUE( + IN VARCHAR2, + IN PLS_INTEGER, + IN VARCHAR2) +``` + +The second form uses an argument name to specify which argument to modify: + +```text +SET_JOB_ARGUMENT_VALUE( + IN VARCHAR2, + IN VARCHAR2, + IN VARCHAR2) +``` + +Argument values set by the `SET_JOB_ARGUMENT_VALUE` procedure override any values set by default. + +**Parameters** + +`job_name` + + `job_name` specifies the name of the job to which the modified argument belongs. + +`argument_position` + + Use `argument_position` to specify the argument position for which the value will be set. + +`argument_name` + + Use `argument_name` to specify the argument by name for which the value will be set. + +`argument_value` + + `argument_value` specifies the new value of the argument. + +**Examples** + +The following example assigns a value of `30` to the first argument in the `update_emp` job: + +```text +DBMS_SCHEDULER.SET_JOB_ARGUMENT_VALUE('update_emp', 1, '30'); +``` + +The following example sets the `emp_name` argument to `SMITH`: + +```text +DBMS_SCHEDULER.SET_JOB_ARGUMENT_VALUE('update_emp', 'emp_name', 'SMITH'); +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/index.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/index.mdx new file mode 100644 index 00000000000..81823c2e9a9 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/15_dbms_scheduler/index.mdx @@ -0,0 +1,49 @@ +--- +title: "DBMS_SCHEDULER" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_scheduler.html" +--- + +The `DBMS_SCHEDULER` package provides a way to create and manage Oracle-styled jobs, programs and job schedules. The `DBMS_SCHEDULER` package implements the following functions and procedures: + +| Function/Procedure | Return Type | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------- | +| `CREATE_JOB(job_name, job_type, job_action, number_of_arguments, start_date, repeat_interval, end_date, job_class, enabled, auto_drop, comments)` | n/a | Use the first form of the `CREATE_JOB` procedure to create a job, specifying program and schedule details by means of parameters. | +| `CREATE_JOB(job_name, program_name, schedule_name, job_class, enabled, auto_drop, comments)` | n/a | Use the second form of `CREATE_JOB` to create a job that uses a named program and named schedule. | +| `CREATE_PROGRAM(program_name, program_type, program_action, number_of_arguments, enabled, comments)` | n/a | Use `CREATE_PROGRAM` to create a program. | +| `CREATE_SCHEDULE(schedule_name, start_date, repeat_interval, end_date, comments)` | n/a | Use the `CREATE_SCHEDULE` procedure to create a schedule. | +| `DEFINE_PROGRAM_ARGUMENT(program_name, argument_position, argument_name, argument_type, default_value, out_argument)` | n/a | Use the first form of the `DEFINE_PROGRAM_ARGUMENT` procedure to define a program argument that has a default value. | +| `DEFINE_PROGRAM_ARGUMENT(program_name, argument_position, argument_name, argument_type, out_argument)` | n/a | Use the first form of the `DEFINE_PROGRAM_ARGUMENT` procedure to define a program argument that does not have a default value. | +| `DISABLE(name, force, commit_semantics)` | n/a | Use the `DISABLE` procedure to disable a job or program. | +| `DROP_JOB(job_name, force, defer, commit_semantics)` | n/a | Use the `DROP_JOB` procedure to drop a job. | +| `DROP_PROGRAM(program_name, force)` | n/a | Use the `DROP_PROGRAM` procedure to drop a program. | +| `DROP_PROGRAM_ARGUMENT(program_name, argument_position)` | n/a | Use the first form of `DROP_PROGRAM_ARGUMENT` to drop a program argument by specifying the argument position. | +| `DROP_PROGRAM_ARGUMENT(program_name, argument_name)` | n/a | Use the second form of `DROP_PROGRAM_ARGUMENT` to drop a program argument by specifying the argument name. | +| `DROP_SCHEDULE(schedule_name, force)` | n/a | Use the `DROP SCHEDULE` procedure to drop a schedule. | +| `ENABLE(name, commit_semantics)` | n/a | Use the `ENABLE` command to enable a program or job. | +| `EVALUATE_CALENDAR_STRING(calendar_string, start_date, return_date_after, next_run_date)` | n/a | Use `EVALUATE_CALENDAR_STRING` to review the execution date described by a user-defined calendar schedule. | +| `RUN_JOB(job_name, use_current_session, manually)` | n/a | Use the `RUN_JOB` procedure to execute a job immediately. | +| `SET_JOB_ARGUMENT_VALUE(job_name, argument_position, argument_value)` | n/a | Use the first form of `SET_JOB_ARGUMENT` value to set the value of a job argument described by the argument's position. | +| `SET_JOB_ARGUMENT_VALUE(job_name, argument_name, argument_value)` | n/a | Use the second form of `SET_JOB_ARGUMENT` value to set the value of a job argument described by the argument's name. | + +Advanced Server's implementation of `DBMS_SCHEDULER` is a partial implementation when compared to Oracle's version. Only those functions and procedures listed in the table above are supported. + +The `DBMS_SCHEDULER` package is dependent on the pgAgent service; you must have a pgAgent service installed and running on your server before using `DBMS_SCHEDULER`. + +Before using `DBMS_SCHEDULER`, a database superuser must create the catalog tables in which the `DBMS_SCHEDULER` programs, schedules and jobs are stored. Use the `psql` client to connect to the database, and invoke the command: + +```text +CREATE EXTENSION dbms_scheduler; +``` + +By default, the `dbms_scheduler` extension resides in the `contrib/dbms_scheduler_ext` subdirectory (under the Advanced Server installation). + +Note that after creating the `DBMS_SCHEDULER` tables, only a superuser will be able to perform a dump or reload of the database. + +
+ +using_calendar_syntax_to_specify_a_repeating_interval create_job create_program create_schedule define_program_argument dbms_scheduler_disable drop_job drop_program drop_program_argument drop_schedule dbms_scheduler_enable evaluate_calendar_string run_job set_job_argument_value + +
diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/16_dbms_session.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/16_dbms_session.mdx new file mode 100644 index 00000000000..d3bd0b4ca40 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/16_dbms_session.mdx @@ -0,0 +1,39 @@ +--- +title: "DBMS_SESSION" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_session.html" +--- + +Advanced Server provides support for the following `DBMS_SESSION.SET_ROLE` procedure: + +| Function/Procedure | Return Type | Description | +| -------------------- | ----------- | ------------------------------------------------------------------------------------- | +| `SET_ROLE(role_cmd)` | n/a | Executes a `SET ROLE` statement followed by the string value specified in `role_cmd`. | + +Advanced Server's implementation of `DBMS_SESSION` is a partial implementation when compared to Oracle's version. Only `DBMS_SESSION.SET_ROLE` is supported. + +## SET_ROLE + +The `SET_ROLE` procedure sets the current session user to the role specified in `role_cmd`. After invoking the `SET_ROLE` procedure, the current session will use the permissions assigned to the specified role. The signature of the procedure is: + +```text +SET_ROLE() +``` + +The `SET_ROLE` procedure appends the value specified for `role_cmd` to the `SET ROLE` statement, and then invokes the statement. + +**Parameters** + +`role_cmd` + + `role_cmd` specifies a role name in the form of a string value. + +**Example** + +The following call to the `SET_ROLE` procedure invokes the `SET ROLE` command to set the identity of the current session user to manager: + +```text +edb=# exec DBMS_SESSION.SET_ROLE('manager'); +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/01_bind_variable.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/01_bind_variable.mdx new file mode 100644 index 00000000000..624e43d670b --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/01_bind_variable.mdx @@ -0,0 +1,79 @@ +--- +title: "BIND_VARIABLE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/bind_variable.html" +--- + +The `BIND_VARIABLE` procedure provides the capability to associate a value with an `IN` or `IN OUT` bind variable in a SQL command. + +```text +BIND_VARIABLE( INTEGER, VARCHAR2, + { BLOB | CLOB | DATE | FLOAT | INTEGER | NUMBER | TIMESTAMP | VARCHAR2 } + [, INTEGER ]) +``` + +**Parameters** + +`c` + + Cursor ID of the cursor for the SQL command with bind variables. + +`name` + + Name of the bind variable in the SQL command. + +`value` + + Value to be assigned. + +`out_value_size` + + If `name` is an `IN OUT` variable, defines the maximum length of the output value. If not specified, the length of `value` is assumed. + +**Examples** + +The following anonymous block uses bind variables to insert a row into the `emp` table. + +```text +DECLARE + curid INTEGER; + v_sql VARCHAR2(150) := 'INSERT INTO emp VALUES ' || + '(:p_empno, :p_ename, :p_job, :p_mgr, ' || + ':p_hiredate, :p_sal, :p_comm, :p_deptno)'; + v_empno emp.empno%TYPE; + v_ename emp.ename%TYPE; + v_job emp.job%TYPE; + v_mgr emp.mgr%TYPE; + v_hiredate emp.hiredate%TYPE; + v_sal emp.sal%TYPE; + v_comm emp.comm%TYPE; + v_deptno emp.deptno%TYPE; + v_status INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + DBMS_SQL.PARSE(curid,v_sql,DBMS_SQL.native); + v_empno := 9001; + v_ename := 'JONES'; + v_job := 'SALESMAN'; + v_mgr := 7369; + v_hiredate := TO_DATE('13-DEC-07','DD-MON-YY'); + v_sal := 8500.00; + v_comm := 1500.00; + v_deptno := 40; + DBMS_SQL.BIND_VARIABLE(curid,':p_empno',v_empno); + DBMS_SQL.BIND_VARIABLE(curid,':p_ename',v_ename); + DBMS_SQL.BIND_VARIABLE(curid,':p_job',v_job); + DBMS_SQL.BIND_VARIABLE(curid,':p_mgr',v_mgr); + DBMS_SQL.BIND_VARIABLE(curid,':p_hiredate',v_hiredate); + DBMS_SQL.BIND_VARIABLE(curid,':p_sal',v_sal); + DBMS_SQL.BIND_VARIABLE(curid,':p_comm',v_comm); + DBMS_SQL.BIND_VARIABLE(curid,':p_deptno',v_deptno); + v_status := DBMS_SQL.EXECUTE(curid); + DBMS_OUTPUT.PUT_LINE('Number of rows processed: ' || v_status); + DBMS_SQL.CLOSE_CURSOR(curid); +END; + +Number of rows processed: 1 +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/02_bind_variable_char.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/02_bind_variable_char.mdx new file mode 100644 index 00000000000..29ee06e5dd7 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/02_bind_variable_char.mdx @@ -0,0 +1,32 @@ +--- +title: "BIND_VARIABLE_CHAR" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/bind_variable_char.html" +--- + +The `BIND_VARIABLE_CHAR` procedure provides the capability to associate a `CHAR` value with an `IN` or `IN OUT` bind variable in a SQL command. + +```text +BIND_VARIABLE_CHAR( INTEGER, VARCHAR2, CHAR + [, INTEGER ]) +``` + +**Parameters** + +`c` + + Cursor ID of the cursor for the SQL command with bind variables. + +`name` + + Name of the bind variable in the SQL command. + +`value` + + Value of type `CHAR` to be assigned. + +`out_value_size` + + If `name` is an `IN OUT` variable, defines the maximum length of the output value. If not specified, the length of `value` is assumed. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/03_bind_variable_raw.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/03_bind_variable_raw.mdx new file mode 100644 index 00000000000..03b58486c96 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/03_bind_variable_raw.mdx @@ -0,0 +1,32 @@ +--- +title: "BIND_VARIABLE_RAW" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/bind_variable_raw.html" +--- + +The `BIND_VARIABLE_RAW` procedure provides the capability to associate a `RAW` value with an `IN` or `IN OUT` bind variable in a SQL command. + +```text +BIND_VARIABLE_RAW( INTEGER, VARCHAR2, RAW + [, INTEGER ]) +``` + +**Parameters** + +`c` + + Cursor ID of the cursor for the SQL command with bind variables. + +`name` + + Name of the bind variable in the SQL command. + +`value` + + Value of type `RAW` to be assigned. + +`out_value_size` + + If `name` is an `IN OUT` variable, defines the maximum length of the output value. If not specified, the length of `value` is assumed. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/04_close_cursor.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/04_close_cursor.mdx new file mode 100644 index 00000000000..fb7feb134c2 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/04_close_cursor.mdx @@ -0,0 +1,35 @@ +--- +title: "CLOSE_CURSOR" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/close_cursor.html" +--- + +The `CLOSE_CURSOR` procedure closes an open cursor. The resources allocated to the cursor are released and it can no longer be used. + +```text +CLOSE_CURSOR( IN OUT INTEGER) +``` + +**Parameters** + +`c` + + Cursor ID of the cursor to be closed. + +**Examples** + +The following example closes a previously opened cursor: + +```text +DECLARE + curid INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + . + . + . + DBMS_SQL.CLOSE_CURSOR(curid); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/05_column_value.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/05_column_value.mdx new file mode 100644 index 00000000000..eca9c17e056 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/05_column_value.mdx @@ -0,0 +1,74 @@ +--- +title: "COLUMN_VALUE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/column_value.html" +--- + +The `COLUMN_VALUE` procedure defines a variable to receive a value from a cursor. + +```text +COLUMN_VALUE( INTEGER, INTEGER, OUT { BLOB | + CLOB | DATE | FLOAT | INTEGER | NUMBER | TIMESTAMP | VARCHAR2 } + [, OUT NUMBER [, OUT INTEGER ]]) +``` + +**Parameters** + +`c` + + Cursor id of the cursor returning data to the variable being defined. + +`position` + + Position within the cursor of the returned data. The first value in the cursor is position 1. + +`value` + + Variable receiving the data returned in the cursor by a prior fetch call. + +`column_error` + + Error number associated with the column, if any. + +`actual_length` + + Actual length of the data prior to any truncation. + +**Examples** + +The following example shows the portion of an anonymous block that receives the values from a cursor using the `COLUMN_VALUE` procedure. + +```text +DECLARE + curid INTEGER; + v_empno NUMBER(4); + v_ename VARCHAR2(10); + v_hiredate DATE; + v_sal NUMBER(7,2); + v_comm NUMBER(7,2); + v_sql VARCHAR2(50) := 'SELECT empno, ename, hiredate, sal, ' || + 'comm FROM emp'; + v_status INTEGER; +BEGIN + . + . + . + LOOP + v_status := DBMS_SQL.FETCH_ROWS(curid); + EXIT WHEN v_status = 0; + DBMS_SQL.COLUMN_VALUE(curid,1,v_empno); + DBMS_SQL.COLUMN_VALUE(curid,2,v_ename); + DBMS_SQL.COLUMN_VALUE(curid,3,v_hiredate); + DBMS_SQL.COLUMN_VALUE(curid,4,v_sal); + DBMS_SQL.COLUMN_VALUE(curid,4,v_sal); + DBMS_SQL.COLUMN_VALUE(curid,5,v_comm); + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || RPAD(v_ename,10) || ' ' || + TO_CHAR(v_hiredate,'yyyy-mm-dd') || ' ' || + TO_CHAR(v_sal,'9,999.99') || ' ' || + TO_CHAR(NVL(v_comm,0),'9,999.99')); + END LOOP; + DBMS_SQL.CLOSE_CURSOR(curid); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/06_column_value_char.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/06_column_value_char.mdx new file mode 100644 index 00000000000..45d646c68f0 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/06_column_value_char.mdx @@ -0,0 +1,36 @@ +--- +title: "COLUMN_VALUE_CHAR" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/column_value_char.html" +--- + +The `COLUMN_VALUE_CHAR` procedure defines a variable to receive a `CHAR` value from a cursor. + +```text +COLUMN_VALUE_CHAR( INTEGER, INTEGER, OUT CHAR + [, OUT NUMBER [, OUT INTEGER ]]) +``` + +**Parameters** + +`c` + + Cursor id of the cursor returning data to the variable being defined. + +`position` + + Position within the cursor of the returned data. The first value in the cursor is position 1. + +`value` + + Variable of data type `CHAR` receiving the data returned in the cursor by a prior fetch call. + +`column_error` + + Error number associated with the column, if any. + +`actual_length` + + Actual length of the data prior to any truncation. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/07_column_value_raw.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/07_column_value_raw.mdx new file mode 100644 index 00000000000..90fcefa99b7 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/07_column_value_raw.mdx @@ -0,0 +1,37 @@ +--- +title: "COLUMN_VALUE_RAW" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/column_value_raw.html" +--- + +The `COLUMN_VALUE_RAW` procedure defines a variable to receive a `RAW` value from a cursor. + +```text +COLUMN_VALUE_RAW( INTEGER, INTEGER, OUT RAW + [, OUT NUMBER [, OUT INTEGER ]]) +``` + +**Parameters** + +`c` + + Cursor id of the cursor returning data to the variable being defined. + +`position` + + Position within the cursor of the returned data. The first value in the cursor is position 1. + +`value` + + Variable of data type `RAW` receiving the data returned in the cursor by a prior fetch call. + +`column_error` + + Error number associated with the column, if any. + +`actual_length` + + Actual length of the data prior to any truncation. + diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/08_define_column.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/08_define_column.mdx new file mode 100644 index 00000000000..cd005bbb71f --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/08_define_column.mdx @@ -0,0 +1,87 @@ +--- +title: "DEFINE_COLUMN" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/define_column.html" +--- + +The `DEFINE_COLUMN` procedure defines a column or expression in the `SELECT` list that is to be returned and retrieved in a cursor. + +```text +DEFINE_COLUMN( INTEGER, INTEGER, { BLOB | + CLOB | DATE | FLOAT | INTEGER | NUMBER | TIMESTAMP | VARCHAR2 } + [, INTEGER ]) +``` + +**Parameters** + +`c` + + Cursor id of the cursor associated with the `SELECT` command. + +`position` + + Position of the column or expression in the `SELECT` list that is being defined. + +`column` + + A variable that is of the same data type as the column or expression in position `position` of the `SELECT` list. + +`column_size` + + The maximum length of the returned data. `column_size` must be specified only if `column` is `VARCHAR2`. Returned data exceeding `column_size` is truncated to `column_size` characters. + +**Examples** + +The following shows how the `empno, ename, hiredate, sal`, and `comm` columns of the `emp` table are defined with the `DEFINE_COLUMN` procedure. + +```text +DECLARE + curid INTEGER; + v_empno NUMBER(4); + v_ename VARCHAR2(10); + v_hiredate DATE; + v_sal NUMBER(7,2); + v_comm NUMBER(7,2); + v_sql VARCHAR2(50) := 'SELECT empno, ename, hiredate, sal, ' || + 'comm FROM emp'; + v_status INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + DBMS_SQL.PARSE(curid,v_sql,DBMS_SQL.native); + DBMS_SQL.DEFINE_COLUMN(curid,1,v_empno); + DBMS_SQL.DEFINE_COLUMN(curid,2,v_ename,10); + DBMS_SQL.DEFINE_COLUMN(curid,3,v_hiredate); + DBMS_SQL.DEFINE_COLUMN(curid,4,v_sal); + DBMS_SQL.DEFINE_COLUMN(curid,5,v_comm); + . + . + . +END; +``` + +The following shows an alternative to the prior example that produces the exact same results. Note that the lengths of the data types are irrelevant – the `empno, sal`, and `comm` columns will still return data equivalent to `NUMBER(4)` and `NUMBER(7,2)`, respectively, even though `v_num` is defined as `NUMBER(1)` (assuming the declarations in the `COLUMN_VALUE` procedure are of the appropriate maximum sizes). The `ename` column will return data up to ten characters in length as defined by the `length` parameter in the `DEFINE_COLUMN` call, not by the data type declaration, `VARCHAR2(1)` declared for `v_varchar`. The actual size of the returned data is dictated by the `COLUMN_VALUE` procedure. + +```text +DECLARE + curid INTEGER; + v_num NUMBER(1); + v_varchar VARCHAR2(1); + v_date DATE; + v_sql VARCHAR2(50) := 'SELECT empno, ename, hiredate, sal, ' || + 'comm FROM emp'; + v_status INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + DBMS_SQL.PARSE(curid,v_sql,DBMS_SQL.native); + DBMS_SQL.DEFINE_COLUMN(curid,1,v_num); + DBMS_SQL.DEFINE_COLUMN(curid,2,v_varchar,10); + DBMS_SQL.DEFINE_COLUMN(curid,3,v_date); + DBMS_SQL.DEFINE_COLUMN(curid,4,v_num); + DBMS_SQL.DEFINE_COLUMN(curid,5,v_num); + . + . + . +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/09_define_column_char.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/09_define_column_char.mdx new file mode 100644 index 00000000000..df66ccdc881 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/09_define_column_char.mdx @@ -0,0 +1,32 @@ +--- +title: "DEFINE_COLUMN_CHAR" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/define_column_char.html" +--- + +The `DEFINE_COLUMN_CHAR` procedure defines a `CHAR` column or expression in the `SELECT` list that is to be returned and retrieved in a cursor. + +```text +DEFINE_COLUMN_CHAR( INTEGER, INTEGER, +CHAR, INTEGER) +``` + +**Parameters** + +`c` + + Cursor id of the cursor associated with the `SELECT` command. + +`position` + + Position of the column or expression in the `SELECT` list that is being defined. + +`column` + + A `CHAR` variable. + +`column_size` + + The maximum length of the returned data. Returned data exceeding `column_size` is truncated to `column_size` characters. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/10_define_column_raw.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/10_define_column_raw.mdx new file mode 100644 index 00000000000..5882316926a --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/10_define_column_raw.mdx @@ -0,0 +1,34 @@ +--- +title: "DEFINE_COLUMN_RAW" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/define_column_raw.html" +--- + +The `DEFINE_COLUMN_RAW` procedure defines a `RAW` column or expression in the `SELECT` list that is to be returned and retrieved in a cursor. + +```text +DEFINE_COLUMN_RAW( INTEGER, INTEGER, RAW, + INTEGER) +``` + +**Parameters** + +`c` + + Cursor id of the cursor associated with the `SELECT` command. + +`position` + + Position of the column or expression in the `SELECT` list that is being defined. + +`column` + + A `RAW` variable. + +`column_size` + + The maximum length of the returned data. Returned data exceeding `column_size` is truncated to `column_size` characters. + + diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/11_describe_columns.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/11_describe_columns.mdx new file mode 100644 index 00000000000..2497f5296d6 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/11_describe_columns.mdx @@ -0,0 +1,42 @@ +--- +title: "DESCRIBE_COLUMNS" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/describe_columns.html" +--- + +The `DESCRIBE_COLUMNS` procedure describes the columns returned by a cursor. + +```text +DESCRIBE_COLUMNS( INTEGER, OUT INTEGER, OUT + DESC_TAB); +``` + +**Parameters** + +`c` + + The cursor ID of the cursor. + +`col_cnt` + + The number of columns in cursor result set. + +`desc_tab` + + The table that contains a description of each column returned by the cursor. The descriptions are of type `DESC_REC`, and contain the following values: + +| Column Name | Type | +| --------------------- | --------------- | +| `col_type` | `INTEGER` | +| `col_max_len` | `INTEGER` | +| `col_name` | `VARCHAR2(128)` | +| `col_name_len` | `INTEGER` | +| `col_schema_name` | `VARCHAR2(128)` | +| `col_schema_name_len` | `INTEGER` | +| `col_precision` | `INTEGER` | +| `col_scale` | `INTEGER` | +| `col_charsetid` | `INTEGER` | +| `col_charsetform` | `INTEGER` | +| `col_null_ok` | `BOOLEAN` | diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/12_execute.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/12_execute.mdx new file mode 100644 index 00000000000..f61a096de6d --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/12_execute.mdx @@ -0,0 +1,42 @@ +--- +title: "EXECUTE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/execute.html" +--- + +The `EXECUTE` function executes a parsed SQL command or SPL block. + +```text + INTEGER EXECUTE( INTEGER) +``` + +**Parameters** + +`c` + + Cursor ID of the parsed SQL command or SPL block to be executed. + +`status` + + Number of rows processed if the SQL command was `DELETE, INSERT`, or `UPDATE`. `status` is meaningless for all other commands. + +**Examples** + +The following anonymous block inserts a row into the `dept` table. + +```text +DECLARE + curid INTEGER; + v_sql VARCHAR2(50); + v_status INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + v_sql := 'INSERT INTO dept VALUES (50, ''HR'', ''LOS ANGELES'')'; + DBMS_SQL.PARSE(curid, v_sql, DBMS_SQL.native); + v_status := DBMS_SQL.EXECUTE(curid); + DBMS_OUTPUT.PUT_LINE('Number of rows processed: ' || v_status); + DBMS_SQL.CLOSE_CURSOR(curid); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/13_execute_and_fetch.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/13_execute_and_fetch.mdx new file mode 100644 index 00000000000..a5ae0eb400a --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/13_execute_and_fetch.mdx @@ -0,0 +1,98 @@ +--- +title: "EXECUTE_AND_FETCH" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/execute_and_fetch.html" +--- + +Function `EXECUTE_AND_FETCH` executes a parsed `SELECT` command and fetches one row. + +```text + INTEGER EXECUTE_AND_FETCH( INTEGER [, BOOLEAN ]) +``` + +**Parameters** + +`c` + + Cursor id of the cursor for the `SELECT` command to be executed. + +`exact` + + If set to `TRUE`, an exception is thrown if the number of rows in the result set is not exactly equal to 1. If set to `FALSE`, no exception is thrown. The default is `FALSE`. A `NO_DATA_FOUND` exception is thrown if `exact` is `TRUE` and there are no rows in the result set. A `TOO_MANY_ROWS` exception is thrown if `exact` is `TRUE` and there is more than one row in the result set. + +`status` + + Returns 1 if a row was successfully fetched, 0 if no rows to fetch. If an exception is thrown, no value is returned. + +**Examples** + +The following stored procedure uses the `EXECUTE_AND_FETCH` function to retrieve one employee using the employee’s name. An exception will be thrown if the employee is not found, or there is more than one employee with the same name. + +```text +CREATE OR REPLACE PROCEDURE select_by_name( + p_ename emp.ename%TYPE +) +IS + curid INTEGER; + v_empno emp.empno%TYPE; + v_hiredate emp.hiredate%TYPE; + v_sal emp.sal%TYPE; + v_comm emp.comm%TYPE; + v_dname dept.dname%TYPE; + v_disp_date VARCHAR2(10); + v_sql VARCHAR2(120) := 'SELECT empno, hiredate, sal, ' || + 'NVL(comm, 0), dname ' || + 'FROM emp e, dept d ' || + 'WHERE ename = :p_ename ' || + 'AND e.deptno = d.deptno'; + v_status INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + DBMS_SQL.PARSE(curid,v_sql,DBMS_SQL.native); + DBMS_SQL.BIND_VARIABLE(curid,':p_ename',UPPER(p_ename)); + DBMS_SQL.DEFINE_COLUMN(curid,1,v_empno); + DBMS_SQL.DEFINE_COLUMN(curid,2,v_hiredate); + DBMS_SQL.DEFINE_COLUMN(curid,3,v_sal); + DBMS_SQL.DEFINE_COLUMN(curid,4,v_comm); + DBMS_SQL.DEFINE_COLUMN(curid,5,v_dname,14); + v_status := DBMS_SQL.EXECUTE_AND_FETCH(curid,TRUE); + DBMS_SQL.COLUMN_VALUE(curid,1,v_empno); + DBMS_SQL.COLUMN_VALUE(curid,2,v_hiredate); + DBMS_SQL.COLUMN_VALUE(curid,3,v_sal); + DBMS_SQL.COLUMN_VALUE(curid,4,v_comm); + DBMS_SQL.COLUMN_VALUE(curid,5,v_dname); + v_disp_date := TO_CHAR(v_hiredate, 'MM/DD/YYYY'); + DBMS_OUTPUT.PUT_LINE('Number : ' || v_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || UPPER(p_ename)); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || v_disp_date); + DBMS_OUTPUT.PUT_LINE('Salary : ' || v_sal); + DBMS_OUTPUT.PUT_LINE('Commission: ' || v_comm); + DBMS_OUTPUT.PUT_LINE('Department: ' || v_dname); + DBMS_SQL.CLOSE_CURSOR(curid); +EXCEPTION + WHEN NO_DATA_FOUND THEN + DBMS_OUTPUT.PUT_LINE('Employee ' || p_ename || ' not found'); + DBMS_SQL.CLOSE_CURSOR(curid); + WHEN TOO_MANY_ROWS THEN + DBMS_OUTPUT.PUT_LINE('Too many employees named, ' || + p_ename || ', found'); + DBMS_SQL.CLOSE_CURSOR(curid); + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('The following is SQLERRM:'); + DBMS_OUTPUT.PUT_LINE(SQLERRM); + DBMS_OUTPUT.PUT_LINE('The following is SQLCODE:'); + DBMS_OUTPUT.PUT_LINE(SQLCODE); + DBMS_SQL.CLOSE_CURSOR(curid); +END; + +EXEC select_by_name('MARTIN') + +Number : 7654 +Name : MARTIN +Hire Date : 09/28/1981 +Salary : 1250 +Commission: 1400 +Department: SALES +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/14_fetch_rows.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/14_fetch_rows.mdx new file mode 100644 index 00000000000..ba50e238475 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/14_fetch_rows.mdx @@ -0,0 +1,86 @@ +--- +title: "FETCH_ROWS" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/fetch_rows.html" +--- + +The `FETCH_ROWS` function retrieves a row from a cursor. + +```text + INTEGER FETCH_ROWS( INTEGER) +``` + +**Parameters** + +`c` + + Cursor ID of the cursor from which to fetch a row. + +`status` + + Returns `1` if a row was successfully fetched, `0` if no more rows to fetch. + +**Examples** + +The following examples fetches the rows from the `emp` table and displays the results. + +```text +DECLARE + curid INTEGER; + v_empno NUMBER(4); + v_ename VARCHAR2(10); + v_hiredate DATE; + v_sal NUMBER(7,2); + v_comm NUMBER(7,2); + v_sql VARCHAR2(50) := 'SELECT empno, ename, hiredate, sal, ' || + 'comm FROM emp'; + v_status INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + DBMS_SQL.PARSE(curid,v_sql,DBMS_SQL.native); + DBMS_SQL.DEFINE_COLUMN(curid,1,v_empno); + DBMS_SQL.DEFINE_COLUMN(curid,2,v_ename,10); + DBMS_SQL.DEFINE_COLUMN(curid,3,v_hiredate); + DBMS_SQL.DEFINE_COLUMN(curid,4,v_sal); + DBMS_SQL.DEFINE_COLUMN(curid,5,v_comm); + + v_status := DBMS_SQL.EXECUTE(curid); + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME HIREDATE SAL COMM'); + DBMS_OUTPUT.PUT_LINE('----- ---------- ---------- -------- ' || + '--------'); + LOOP + v_status := DBMS_SQL.FETCH_ROWS(curid); + EXIT WHEN v_status = 0; + DBMS_SQL.COLUMN_VALUE(curid,1,v_empno); + DBMS_SQL.COLUMN_VALUE(curid,2,v_ename); + DBMS_SQL.COLUMN_VALUE(curid,3,v_hiredate); + DBMS_SQL.COLUMN_VALUE(curid,4,v_sal); + DBMS_SQL.COLUMN_VALUE(curid,4,v_sal); + DBMS_SQL.COLUMN_VALUE(curid,5,v_comm); + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || RPAD(v_ename,10) || ' ' || + TO_CHAR(v_hiredate,'yyyy-mm-dd') || ' ' || + TO_CHAR(v_sal,'9,999.99') || ' ' || + TO_CHAR(NVL(v_comm,0),'9,999.99')); + END LOOP; + DBMS_SQL.CLOSE_CURSOR(curid); +END; + +EMPNO ENAME HIREDATE SAL COMM +----- ---------- ---------- -------- -------- +7369 SMITH 1980-12-17 800.00 .00 +7499 ALLEN 1981-02-20 1,600.00 300.00 +7521 WARD 1981-02-22 1,250.00 500.00 +7566 JONES 1981-04-02 2,975.00 .00 +7654 MARTIN 1981-09-28 1,250.00 1,400.00 +7698 BLAKE 1981-05-01 2,850.00 .00 +7782 CLARK 1981-06-09 2,450.00 .00 +7788 SCOTT 1987-04-19 3,000.00 .00 +7839 KING 1981-11-17 5,000.00 .00 +7844 TURNER 1981-09-08 1,500.00 .00 +7876 ADAMS 1987-05-23 1,100.00 .00 +7900 JAMES 1981-12-03 950.00 .00 +7902 FORD 1981-12-03 3,000.00 .00 +7934 MILLER 1982-01-23 1,300.00 .00 +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/15_is_open.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/15_is_open.mdx new file mode 100644 index 00000000000..a345784cda1 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/15_is_open.mdx @@ -0,0 +1,23 @@ +--- +title: "IS_OPEN" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/is_open.html" +--- + +The `IS_OPEN` function provides the capability to test if the given cursor is open. + +```text + BOOLEAN IS_OPEN( INTEGER) +``` + +**Parameters** + +`c` + + Cursor ID of the cursor to be tested. + +`status` + + Set to `TRUE` if the cursor is open, set to `FALSE` if the cursor is not open. diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/16_last_row_count.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/16_last_row_count.mdx new file mode 100644 index 00000000000..be85eb5de86 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/16_last_row_count.mdx @@ -0,0 +1,86 @@ +--- +title: "LAST_ROW_COUNT" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/last_row_count.html" +--- + +The `LAST_ROW_COUNT` function returns the number of rows that have been currently fetched. + +```text + INTEGER LAST_ROW_COUNT +``` + +**Parameters** + +`rowcnt` + + Number of row fetched thus far. + +**Examples** + +The following example uses the `LAST_ROW_COUNT` function to display the total number of rows fetched in the query. + +```text +DECLARE + curid INTEGER; + v_empno NUMBER(4); + v_ename VARCHAR2(10); + v_hiredate DATE; + v_sal NUMBER(7,2); + v_comm NUMBER(7,2); + v_sql VARCHAR2(50) := 'SELECT empno, ename, hiredate, sal, ' || + 'comm FROM emp'; + v_status INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + DBMS_SQL.PARSE(curid,v_sql,DBMS_SQL.native); + DBMS_SQL.DEFINE_COLUMN(curid,1,v_empno); + DBMS_SQL.DEFINE_COLUMN(curid,2,v_ename,10); + DBMS_SQL.DEFINE_COLUMN(curid,3,v_hiredate); + DBMS_SQL.DEFINE_COLUMN(curid,4,v_sal); + DBMS_SQL.DEFINE_COLUMN(curid,5,v_comm); + + v_status := DBMS_SQL.EXECUTE(curid); + DBMS_OUTPUT.PUT_LINE('EMPNO ENAME HIREDATE SAL COMM'); + DBMS_OUTPUT.PUT_LINE('----- ---------- ---------- -------- ' || + '--------'); + LOOP + v_status := DBMS_SQL.FETCH_ROWS(curid); + EXIT WHEN v_status = 0; + DBMS_SQL.COLUMN_VALUE(curid,1,v_empno); + DBMS_SQL.COLUMN_VALUE(curid,2,v_ename); + DBMS_SQL.COLUMN_VALUE(curid,3,v_hiredate); + DBMS_SQL.COLUMN_VALUE(curid,4,v_sal); + DBMS_SQL.COLUMN_VALUE(curid,4,v_sal); + DBMS_SQL.COLUMN_VALUE(curid,5,v_comm); + DBMS_OUTPUT.PUT_LINE(v_empno || ' ' || RPAD(v_ename,10) || ' ' || + TO_CHAR(v_hiredate,'yyyy-mm-dd') || ' ' || + TO_CHAR(v_sal,'9,999.99') || ' ' || + TO_CHAR(NVL(v_comm,0),'9,999.99')); + END LOOP; + DBMS_OUTPUT.PUT_LINE('Number of rows: ' || DBMS_SQL.LAST_ROW_COUNT); + DBMS_SQL.CLOSE_CURSOR(curid); +END; + +EMPNO ENAME HIREDATE SAL COMM +----- ---------- ---------- -------- -------- +7369 SMITH 1980-12-17 800.00 .00 +7499 ALLEN 1981-02-20 1,600.00 300.00 +7521 WARD 1981-02-22 1,250.00 500.00 +7566 JONES 1981-04-02 2,975.00 .00 +7654 MARTIN 1981-09-28 1,250.00 1,400.00 +7698 BLAKE 1981-05-01 2,850.00 .00 +7782 CLARK 1981-06-09 2,450.00 .00 +7788 SCOTT 1987-04-19 3,000.00 .00 +7839 KING 1981-11-17 5,000.00 .00 +7844 TURNER 1981-09-08 1,500.00 .00 +7876 ADAMS 1987-05-23 1,100.00 .00 +7900 JAMES 1981-12-03 950.00 .00 +7902 FORD 1981-12-03 3,000.00 .00 +7934 MILLER 1982-01-23 1,300.00 .00 +Number of rows: 14 +``` + + diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/17_open_cursor.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/17_open_cursor.mdx new file mode 100644 index 00000000000..e763dc58026 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/17_open_cursor.mdx @@ -0,0 +1,34 @@ +--- +title: "OPEN_CURSOR" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/open_cursor.html" +--- + +The `OPEN_CURSOR` function creates a new cursor. A cursor must be used to parse and execute any dynamic SQL statement. Once a cursor has been opened, it can be re-used with the same or different SQL statements. The cursor does not have to be closed and re-opened in order to be re-used. + +```text + INTEGER OPEN_CURSOR +``` + +**Parameters** + +`c` + + Cursor ID number associated with the newly created cursor. + +**Examples** + +The following example creates a new cursor: + +```text +DECLARE + curid INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + . + . + . +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/18_parse.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/18_parse.mdx new file mode 100644 index 00000000000..84b004c4d1f --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/18_parse.mdx @@ -0,0 +1,85 @@ +--- +title: "PARSE" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/parse.html" +--- + +The `PARSE` procedure parses a SQL command or SPL block. If the SQL command is a DDL command, it is immediately executed and does not require running the `EXECUTE` function. + +```text +PARSE( INTEGER, VARCHAR2, INTEGER) +``` + +**Parameters** + +`c` + + Cursor ID of an open cursor. + +`statement` + + SQL command or SPL block to be parsed. A SQL command must not end with the semi-colon terminator, however an SPL block does require the semi-colon terminator. + +`language_flag` + + Language flag provided for compatibility with Oracle syntax. Use `DBMS_SQL.V6, DBMS_SQL.V7` or `DBMS_SQL.native`. This flag is ignored, and all syntax is assumed to be in EnterpriseDB Advanced Server form. + +**Examples** + +The following anonymous block creates a table named, `job`. Note that DDL statements are executed immediately by the `PARSE` procedure and do not require a separate `EXECUTE` step. + +```text +DECLARE + curid INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + DBMS_SQL.PARSE(curid, 'CREATE TABLE job (jobno NUMBER(3), ' || + 'jname VARCHAR2(9))',DBMS_SQL.native); + DBMS_SQL.CLOSE_CURSOR(curid); +END; +``` + +The following inserts two rows into the `job` table. + +```text +DECLARE + curid INTEGER; + v_sql VARCHAR2(50); + v_status INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + v_sql := 'INSERT INTO job VALUES (100, ''ANALYST'')'; + DBMS_SQL.PARSE(curid, v_sql, DBMS_SQL.native); + v_status := DBMS_SQL.EXECUTE(curid); + DBMS_OUTPUT.PUT_LINE('Number of rows processed: ' || v_status); + v_sql := 'INSERT INTO job VALUES (200, ''CLERK'')'; + DBMS_SQL.PARSE(curid, v_sql, DBMS_SQL.native); + v_status := DBMS_SQL.EXECUTE(curid); + DBMS_OUTPUT.PUT_LINE('Number of rows processed: ' || v_status); + DBMS_SQL.CLOSE_CURSOR(curid); +END; + +Number of rows processed: 1 +Number of rows processed: 1 +``` + +The following anonymous block uses the `DBMS_SQL` package to execute a block containing two `INSERT` statements. Note that the end of the block contains a terminating semi-colon, while in the prior example, each individual `INSERT` statement does not have a terminating semi-colon. + +```text +DECLARE + curid INTEGER; + v_sql VARCHAR2(100); + v_status INTEGER; +BEGIN + curid := DBMS_SQL.OPEN_CURSOR; + v_sql := 'BEGIN ' || + 'INSERT INTO job VALUES (300, ''MANAGER''); ' || + 'INSERT INTO job VALUES (400, ''SALESMAN''); ' || + 'END;'; + DBMS_SQL.PARSE(curid, v_sql, DBMS_SQL.native); + v_status := DBMS_SQL.EXECUTE(curid); + DBMS_SQL.CLOSE_CURSOR(curid); +END; +``` diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/index.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/index.mdx new file mode 100644 index 00000000000..67be14f4d83 --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/17_dbms_sql/index.mdx @@ -0,0 +1,48 @@ +--- +title: "DBMS_SQL" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_sql.html" +--- + +The `DBMS_SQL` package provides an application interface compatible with Oracle databases to the EnterpriseDB dynamic SQL functionality. With `DBMS_SQL` you can construct queries and other commands at run time (rather than when you write the application). EnterpriseDB Advanced Server offers native support for dynamic SQL; `DBMS_SQL` provides a way to use dynamic SQL in a fashion compatible with Oracle databases without modifying your application. + +`DBMS_SQL` assumes the privileges of the current user when executing dynamic SQL statements. + +| Function/Procedure | Function or Procedure | Return Type | Description | +| --------------------------------------------------------------------------------------- | --------------------- | ----------- | ---------------------------------------------------------------------- | +| `BIND_VARIABLE(c, name, value [, out_value_size ])` | Procedure | n/a | Bind a value to a variable. | +| `BIND_VARIABLE_CHAR(c, name, value [, out_value_size ])` | Procedure | n/a | Bind a `CHAR` value to a variable. | +| `BIND_VARIABLE_RAW(c, name, value [, out_value_size ])` | Procedure | n/a | Bind a `RAW` value to a variable. | +| `CLOSE_CURSOR(c IN OUT)` | Procedure | n/a | Close a cursor. | +| `COLUMN_VALUE(c, position, value OUT [, column_error OUT [, actual_length OUT ]])` | Procedure | n/a | Return a column value into a variable. | +| `COLUMN_VALUE_CHAR(c, position, value OUT [, column_error OUT [, actual_length OUT ]])` | Procedure | n/a | Return a `CHAR` column value into a variable. | +| `COLUMN_VALUE_RAW(c, position, value OUT [, column_error OUT [, actual_length OUT ]])` | Procedure | n/a | Return a `RAW` column value into a variable. | +| `DEFINE_COLUMN(c, position, column [, column_size ])` | Procedure | n/a | Define a column in the `SELECT` list. | +| `DEFINE_COLUMN_CHAR(c, position, column, column_size)` | Procedure | n/a | Define a `CHAR` column in the `SELECT` list. | +| `DEFINE_COLUMN_RAW(c, position, column, column_size)` | Procedure | n/a | Define a `RAW` column in the `SELECT` list. | +| `DESCRIBE_COLUMNS` | Procedure | n/a | Defines columns to hold a cursor result set. | +| `EXECUTE(c)` | Function | `INTEGER` | Execute a cursor. | +| `EXECUTE_AND_FETCH(c [, exact ])` | Function | `INTEGER` | Execute a cursor and fetch a single row. | +| `FETCH_ROWS(c)` | Function | `INTEGER` | Fetch rows from the cursor. | +| `IS_OPEN(c)` | Function | `BOOLEAN` | Check if a cursor is open. | +| `LAST_ROW_COUNT` | Function | `INTEGER` | Return cumulative number of rows fetched. | +| `OPEN_CURSOR` | Function | `INTEGER` | Open a cursor. | +| `PARSE(c, statement, language_flag)` | Procedure | n/a | Parse a statement. | + +Advanced Server's implementation of `DBMS_SQL` is a partial implementation when compared to Oracle's version. Only those functions and procedures listed in the table above are supported. + +The following table lists the public variable available in the `DBMS_SQL` package. + +| Public Variables | Data Type | Value | Description | +| ---------------- | --------- | ----- | ----------------------------------------------------------------------------------------- | +| `native` | `INTEGER` | `1` | Provided for compatibility with Oracle syntax. See `DBMS_SQL.PARSE` for more information. | +| `V6` | `INTEGER` | `2` | Provided for compatibility with Oracle syntax. See `DBMS_SQL.PARSE` for more information. | +| `V7` | `INTEGER` | `3` | Provided for compatibility with Oracle syntax. See `DBMS_SQL.PARSE` for more information | + +
+ +bind_variable bind_variable_char bind_variable_raw close_cursor column_value column_value_char column_value_raw define_column define_column_char define_column_raw describe_columns execute execute_and_fetch fetch_rows is_open last_row_count open_cursor parse + +
diff --git a/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/18_dbms_utility.mdx b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/18_dbms_utility.mdx new file mode 100644 index 00000000000..261e903530a --- /dev/null +++ b/product_docs/docs/epas/11/epas_compat_bip_guide/03_built-in_packages/18_dbms_utility.mdx @@ -0,0 +1,783 @@ +--- +title: "DBMS_UTILITY" + +legacyRedirectsGenerated: + # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. + - "/edb-docs/d/edb-postgres-advanced-server/reference/database-compatibility-for-oracle-developers-built-in-package-guide/13/dbms_utility.html" +--- + +The `DBMS_UTILITY` package provides support for the following various utility programs: + +| Function/Procedure | Function or Procedure | Return Type | Description | +| -------------------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ------------------------------------------------------------ | +| `ANALYZE_DATABASE(method [, estimate_rows [, estimate_percent [, method_opt ]]])` | Procedure | n/a | Analyze database tables. | +| `ANALYZE_PART_OBJECT(schema, object_name [, object_type [, command_type [, command_opt [, sample_clause ]]]])` | Procedure | n/a | Analyze a partitioned table. | +| `ANALYZE_SCHEMA(schema, method [, estimate_rows [, estimate_percent [, method_opt ]]])` | Procedure | n/a | Analyze schema tables. | +| `CANONICALIZE(name, canon_name OUT, canon_len)` | Procedure | n/a | Canonicalizes a string – e.g., strips off white space. | +| `COMMA_TO_TABLE(list, tablen OUT, tab OUT)` | Procedure | n/a | Convert a comma-delimited list of names to a table of names. | +| `DB_VERSION(version OUT, compatibility OUT)` | Procedure | n/a | Get the database version. | +| `EXEC_DDL_STATEMENT (parse_string)` | Procedure | n/a | Execute a DDL statement. | +| `FORMAT_CALL_STACK` | Function | `TEXT` | Formats the current call stack. | +| `GET_CPU_TIME` | Function | `NUMBER` | Get the current CPU time. | +| `GET_DEPENDENCY(type, schema, name)` | Procedure | n/a | Get objects that are dependent upon the given object.. | +| `GET_HASH_VALUE(name, base, hash_size)` | Function | `NUMBER` | Compute a hash value. | +| `GET_PARAMETER_VALUE(parnam, intval OUT, strval OUT)` | Procedure | `BINARY_INTEGER` | Get database initialization parameter settings. | +| `GET_TIME` | Function | `NUMBER` | Get the current time. | +| `NAME_TOKENIZE(name, a OUT, b OUT, c OUT, dblink OUT, nextpos OUT)` | Procedure | n/a | Parse the given name into its component parts. | +| `TABLE_TO_COMMA(tab, tablen OUT, list OUT)` | Procedure | n/a | Convert a table of names to a comma-delimited list. | + +Advanced Server's implementation of `DBMS_UTILITY` is a partial implementation when compared to Oracle's version. Only those functions and procedures listed in the table above are supported. + +The following table lists the public variables available in the `DBMS_UTILITY` package. + +| Public Variables | Data Type | Value | Description | +| --------------------------- | ------------- | ----- | ----------------------------------- | +| `inv_error_on_restrictions` | `PLS_INTEGER` | `1` | Used by the `INVALIDATE` procedure. | +| `lname_array` | `TABLE` | | For lists of long names. | +| `uncl_array` | `TABLE` | | For lists of users and names. | + + + +## LNAME_ARRAY + +The `LNAME_ARRAY` is for storing lists of long names including fully-qualified names. + +```text +TYPE lname_array IS TABLE OF VARCHAR2(4000) INDEX BY BINARY_INTEGER; +``` + + + +## UNCL_ARRAY + +The `UNCL_ARRAY` is for storing lists of users and names. + +```text +TYPE uncl_array IS TABLE OF VARCHAR2(227) INDEX BY BINARY_INTEGER; +``` + +## ANALYZE_DATABASE, ANALYZE SCHEMA and ANALYZE PART_OBJECT + +The `ANALYZE_DATABASE(), ANALYZE_SCHEMA() and ANALYZE_PART_OBJECT()` procedures provide the capability to gather statistics on tables in the database. When you execute the `ANALYZE` statement, Postgres samples the data in a table and records distribution statistics in the `pg_statistics system` table. + +`ANALYZE_DATABASE, ANALYZE_SCHEMA`, and `ANALYZE_PART_OBJECT` differ primarily in the number of tables that are processed: + +- `ANALYZE_DATABASE` analyzes all tables in all schemas within the current database. +- `ANALYZE_SCHEMA` analyzes all tables in a given schema (within the current database). +- `ANALYZE_PART_OBJECT` analyzes a single table. + +The syntax for the `ANALYZE` commands are: + +```text +ANALYZE_DATABASE( VARCHAR2 [, NUMBER + [, NUMBER [, VARCHAR2 ]]]) + +ANALYZE_SCHEMA( VARCHAR2, VARCHAR2 + [, NUMBER [, NUMBER + [, VARCHAR2 ]]]) + +ANALYZE_PART_OBJECT( VARCHAR2, VARCHAR2 + [, CHAR [, CHAR + [, VARCHAR2 [, ]]]]) +``` + +**Parameters** - `ANALYZE_DATABASE` and `ANALYZE_SCHEMA` + +`method` + + method determines whether the `ANALYZE` procedure populates the `pg_statistics` table or removes entries from the `pg_statistics` table. If you specify a method of `DELETE`, the `ANALYZE` procedure removes the relevant rows from `pg_statistics`. If you specify a method of `COMPUTE` or `ESTIMATE`, the `ANALYZE` procedure analyzes a table (or multiple tables) and records the distribution information in `pg_statistics`. There is no difference between `COMPUTE` and `ESTIMATE`; both methods execute the Postgres `ANALYZE` statement. All other parameters are validated and then ignored. + +`estimate_rows` + + Number of rows upon which to base estimated statistics. One of `estimate_rows` or `estimate_percent` must be specified if method is `ESTIMATE`. + + This argument is ignored, but is included for compatibility. + +`estimate_percent` + + Percentage of rows upon which to base estimated statistics. One of `estimate_rows` or `estimate_percent` must be specified if method is `ESTIMATE`. + + This argument is ignored, but is included for compatibility. + +`method_opt` + + Object types to be analyzed. Any combination of the following: + +``` +[ FOR TABLE ] + +[ FOR ALL [ INDEXED ] COLUMNS ] [ SIZE n ] + +[ FOR ALL INDEXES ] +``` + + This argument is ignored, but is included for compatibility. + +**Parameters** - `ANALYZE_PART_OBJECT` + +`schema` + + Name of the schema whose objects are to be analyzed. + +`object_name` + + Name of the partitioned object to be analyzed. + +`object_type` + + Type of object to be analyzed. Valid values are: `T` – table, `I` – index. + + This argument is ignored, but is included for compatibility. + +`command_type` + + Type of analyze functionality to perform. Valid values are: `E` - gather estimated statistics based upon on a specified number of rows or a percentage of rows in the `sample_clause` clause; `C` - compute exact statistics; or `V` – validate the structure and integrity of the partitions. + + This argument is ignored, but is included for compatibility. + +`command_opt` + + For `command_type` `C` or `E`, can be any combination of: + +``` +[ FOR TABLE ] + +[ FOR ALL COLUMNS ] + +[ FOR ALL LOCAL INDEXES ] +``` + + For `command_type V`, can be `CASCADE` if `object_type` is `T`. + + This argument is ignored, but is included for compatibility. + +`sample_clause` + + If `command_type` is `E`, contains the following clause to specify the number of rows or percentage or rows on which to base the estimate. + + `SAMPLE n { ROWS | PERCENT }` + + This argument is ignored, but is included for compatibility. + +## CANONICALIZE + +The `CANONICALIZE` procedure performs the following operations on an input string: + +- If the string is not double-quoted, verifies that it uses the characters of a legal identifier. If not, an exception is thrown. If the string is double-quoted, all characters are allowed. +- If the string is not double-quoted and does not contain periods, uppercases all alphabetic characters and eliminates leading and trailing spaces. +- If the string is double-quoted and does not contain periods, strips off the double quotes. +- If the string contains periods and no portion of the string is double-quoted, uppercases each portion of the string and encloses each portion in double quotes. +- If the string contains periods and portions of the string are double-quoted, returns the double-quoted portions unchanged including the double quotes and returns the non-double-quoted portions uppercased and enclosed in double quotes. + +```text +CANONICALIZE( VARCHAR2, OUT VARCHAR2, + BINARY_INTEGER) +``` + +**Parameters** + +`name` + + String to be canonicalized. + +`canon_name` + + The canonicalized string. + +`canon_len` + + Number of bytes in `name` to canonicalize starting from the first character. + +**Examples** + +The following procedure applies the `CANONICALIZE` procedure on its input parameter and displays the results. + +```text +CREATE OR REPLACE PROCEDURE canonicalize ( + p_name VARCHAR2, + p_length BINARY_INTEGER DEFAULT 30 +) +IS + v_canon VARCHAR2(100); +BEGIN + DBMS_UTILITY.CANONICALIZE(p_name,v_canon,p_length); + DBMS_OUTPUT.PUT_LINE('Canonicalized name ==>' || v_canon || '<=='); + DBMS_OUTPUT.PUT_LINE('Length: ' || LENGTH(v_canon)); +EXCEPTION + WHEN OTHERS THEN + DBMS_OUTPUT.PUT_LINE('SQLERRM: ' || SQLERRM); + DBMS_OUTPUT.PUT_LINE('SQLCODE: ' || SQLCODE); +END; + +EXEC canonicalize('Identifier') +Canonicalized name ==>IDENTIFIER<== +Length: 10 + +EXEC canonicalize('"Identifier"') +Canonicalized name ==>Identifier<== +Length: 10 + +EXEC canonicalize('"_+142%"') +Canonicalized name ==>_+142%<== +Length: 6 + +EXEC canonicalize('abc.def.ghi') +Canonicalized name ==>"ABC"."DEF"."GHI"<== +Length: 17 + +EXEC canonicalize('"abc.def.ghi"') +Canonicalized name ==>abc.def.ghi<== +Length: 11 + +EXEC canonicalize('"abc".def."ghi"') +Canonicalized name ==>"abc"."DEF"."ghi"<== +Length: 17 + +EXEC canonicalize('"abc.def".ghi') +Canonicalized name ==>"abc.def"."GHI"<== +Length: 15 +``` + +## COMMA_TO_TABLE + +The `COMMA_TO_TABLE` procedure converts a comma-delimited list of names into a table of names. Each entry in the list becomes a table entry. The names must be formatted as valid identifiers. + +```text +COMMA_TO_TABLE( VARCHAR2, OUT BINARY_INTEGER, + OUT { LNAME_ARRAY | UNCL_ARRAY }) +``` + +**Parameters** + +`list` + + Comma-delimited list of names. + +`tablen` + + Number of entries in `tab`. + +`tab` + + Table containing the individual names in `list`. + +`LNAME_ARRAY` + + A `DBMS_UTILITY LNAME_ARRAY` (as described in the [LNAME_ARRAY](#lname_array) section). + +`UNCL_ARRAY` + + A `DBMS_UTILITY UNCL_ARRAY` (as described in the [UNCL_ARRAY](#uncl_array) section). + +**Examples** + +The following procedure uses the `COMMA_TO_TABLE` procedure to convert a list of names to a table. The table entries are then displayed. + +```text +CREATE OR REPLACE PROCEDURE comma_to_table ( + p_list VARCHAR2 +) +IS + r_lname DBMS_UTILITY.LNAME_ARRAY; + v_length BINARY_INTEGER; +BEGIN + DBMS_UTILITY.COMMA_TO_TABLE(p_list,v_length,r_lname); + FOR i IN 1..v_length LOOP + DBMS_OUTPUT.PUT_LINE(r_lname(i)); + END LOOP; +END; + +EXEC comma_to_table('edb.dept, edb.emp, edb.jobhist') + +edb.dept +edb.emp +edb.jobhist +``` + +## DB_VERSION + +The `DB_VERSION` procedure returns the version number of the database. + +```text +DB_VERSION( OUT VARCHAR2, OUT VARCHAR2) +``` + +**Parameters** + +`version` + + Database version number. + +`compatibility` + + Compatibility setting of the database. (To be implementation-defined as to its meaning.) + +**Examples** + +The following anonymous block displays the database version information. + +```text +DECLARE + v_version VARCHAR2(150); + v_compat VARCHAR2(150); +BEGIN + DBMS_UTILITY.DB_VERSION(v_version,v_compat); + DBMS_OUTPUT.PUT_LINE('Version: ' || v_version); + DBMS_OUTPUT.PUT_LINE('Compatibility: ' || v_compat); +END; + +Version: EnterpriseDB 11.0.0 on i686-pc-linux-gnu, compiled by GCC gcc +(GCC) 4.1.2 20080704 (Red Hat 4.1.2-48), 32-bit +Compatibility: EnterpriseDB 11.0.0 on i686-pc-linux-gnu, compiled by GCC +gcc (GCC) 4.1.220080704 (Red Hat 4.1.2-48), 32-bit +``` + +## EXEC_DDL_STATEMENT + +The `EXEC_DDL_STATEMENT` provides the capability to execute a `DDL` command. + +```text +EXEC_DDL_STATEMENT( VARCHAR2) +``` + +**Parameters** + +`parse_string` + + The DDL command to be executed. + +**Examples** + +The following anonymous block creates the `job` table. + +```text +BEGIN + DBMS_UTILITY.EXEC_DDL_STATEMENT( + 'CREATE TABLE job (' || + 'jobno NUMBER(3),' || + 'jname VARCHAR2(9))' + ); +END; +``` + +If the `parse_string` does not include a valid DDL statement, Advanced Server returns the following error: + +```text +edb=#  exec dbms_utility.exec_ddl_statement('select rownum from dual'); +ERROR:  EDB-20001: 'parse_string' must be a valid DDL statement +``` + +In this case, Advanced Server's behavior differs from Oracle's; Oracle accepts the invalid `parse_string` without complaint. + +## FORMAT_CALL_STACK + +The `FORMAT_CALL_STACK` function returns the formatted contents of the current call stack. + +```text +DBMS_UTILITY.FORMAT_CALL_STACK +return VARCHAR2 +``` + +This function can be used in a stored procedure, function or package to return the current call stack in a readable format. This function is useful for debugging purposes. + +## GET_CPU_TIME + +The `GET_CPU_TIME` function returns the CPU time in hundredths of a second from some arbitrary point in time. + +```text + NUMBER GET_CPU_TIME +``` + +**Parameters** + +`cputime` + + Number of hundredths of a second of CPU time. + +**Examples** + +The following `SELECT` command retrieves the current CPU time, which is 603 hundredths of a second or .0603 seconds. + +```text +SELECT DBMS_UTILITY.GET_CPU_TIME FROM DUAL; + +get_cpu_time +-------------- + 603 +``` + +## GET_DEPENDENCY + +The `GET_DEPENDENCY` procedure provides the capability to list the objects that are dependent upon the specified object. `GET_DEPENDENCY` does not show dependencies for functions or procedures. + +```text +GET_DEPENDENCY( VARCHAR2, VARCHAR2, + VARCHAR2) +``` + +**Parameters** + +`type` + + The object type of `name`. Valid values are `INDEX, PACKAGE, PACKAGE BODY, SEQUENCE, TABLE, TRIGGER, TYPE` and `VIEW`. + +`schema` + + Name of the schema in which `name` exists. + +`name` + + Name of the object for which dependencies are to be obtained. + +**Examples** + +The following anonymous block finds dependencies on the `EMP` table. + +```text +BEGIN + DBMS_UTILITY.GET_DEPENDENCY('TABLE','public','EMP'); +END; + +DEPENDENCIES ON public.EMP +------------------------------------------------------------------ +*TABLE public.EMP() +* CONSTRAINT c public.emp() +* CONSTRAINT f public.emp() +* CONSTRAINT p public.emp() +* TYPE public.emp() +* CONSTRAINT c public.emp() +* CONSTRAINT f public.jobhist() +* VIEW .empname_view() +``` + +## GET_HASH_VALUE + +The `GET_HASH_VALUE` function provides the capability to compute a hash value for a given string. + +```text + NUMBER GET_HASH_VALUE( VARCHAR2, NUMBER, + NUMBER) +``` + +**Parameters** + +`name` + + The string for which a hash value is to be computed. + +`base` + + Starting value at which hash values are to be generated. + +`hash_size` + + The number of hash values for the desired hash table. + +`hash` + + The generated hash value. + +**Examples** + +The following anonymous block creates a table of hash values using the `ename` column of the `emp` table and then displays the key along with the hash value. The hash values start at 100 with a maximum of 1024 distinct values. + +```text +DECLARE + v_hash NUMBER; + TYPE hash_tab IS TABLE OF NUMBER INDEX BY VARCHAR2(10); + r_hash HASH_TAB; + CURSOR emp_cur IS SELECT ename FROM emp; +BEGIN + FOR r_emp IN emp_cur LOOP + r_hash(r_emp.ename) := + DBMS_UTILITY.GET_HASH_VALUE(r_emp.ename,100,1024); + END LOOP; + FOR r_emp IN emp_cur LOOP + DBMS_OUTPUT.PUT_LINE(RPAD(r_emp.ename,10) || ' ' || + r_hash(r_emp.ename)); + END LOOP; +END; + +SMITH 377 +ALLEN 740 +WARD 718 +JONES 131 +MARTIN 176 +BLAKE 568 +CLARK 621 +SCOTT 1097 +KING 235 +TURNER 850 +ADAMS 156 +JAMES 942 +FORD 775 +MILLER 148 +``` + +## GET_PARAMETER_VALUE + +The `GET_PARAMETER_VALUE` procedure provides the capability to retrieve database initialization parameter settings. + +```text + BINARY_INTEGER GET_PARAMETER_VALUE( VARCHAR2, + OUT INTEGER, OUT VARCHAR2) +``` + +**Parameters** + +`parnam` + + Name of the parameter whose value is to be returned. The parameters are listed in the `pg_settings` system view. + +`intval` + + Value of an integer parameter or the length of `strval`. + +`strval` + + Value of a string parameter. + +`status` + + Returns 0 if the parameter value is `INTEGER` or `BOOLEAN`. Returns 1 if the parameter value is a string. + +**Examples** + +The following anonymous block shows the values of two initialization parameters. + +```text +DECLARE + v_intval INTEGER; + v_strval VARCHAR2(80); +BEGIN + DBMS_UTILITY.GET_PARAMETER_VALUE('max_fsm_pages', v_intval, v_strval); + DBMS_OUTPUT.PUT_LINE('max_fsm_pages' || ': ' || v_intval); + DBMS_UTILITY.GET_PARAMETER_VALUE('client_encoding', v_intval, v_strval); + DBMS_OUTPUT.PUT_LINE('client_encoding' || ': ' || v_strval); +END; + +max_fsm_pages: 72625 +client_encoding: SQL_ASCII +``` + +## GET_TIME + +The `GET_TIME` function provides the capability to return the current time in hundredths of a second. + +```text +