From 50bad2310daada1f79660eb7cfbd4b456b4ca437 Mon Sep 17 00:00:00 2001 From: francoughlin Date: Thu, 3 Aug 2023 16:45:55 -0400 Subject: [PATCH 01/38] Edits for the Application programming branch Assorted edits for the Application programming branch, as well as some file restructure and topic re-location --- .../01_creating_a_procedure.mdx | 4 +- .../04_procedures_overview/index.mdx | 1 + .../01_creating_a_function.mdx | 6 +- .../05_functions_overview/index.mdx | 1 + ...positional_vs_named_parameter_notation.mdx | 6 +- .../02_parameter_modes.mdx | 8 +- .../03_using_default_values_in_parameters.mdx | 4 + .../declaring_parameters.mdx | 87 ++++++++++++++++++ .../index.mdx | 89 ++----------------- ...ion_errors_in_procedures_and_functions.mdx | 3 + .../01_oracle_compat_summary.mdx | 2 +- .../01_interval_range_partitioning.mdx | 4 +- .../02_automatic_list_partitioning.mdx | 2 +- .../02_selecting_a_partition_type/index.mdx | 4 + .../03_using_partition_pruning/index.mdx | 8 +- .../index.mdx | 2 +- .../01_default_optimization_modes.mdx | 4 +- .../02_access_method_hints.mdx | 10 +-- .../04_joining_relations_hints.mdx | 12 ++- .../05_optimizer_hints/05_global_hints.mdx | 8 +- .../06_using_the_append_optimizer_hint.mdx | 2 +- .../07_parallelism_hints.mdx | 24 ++--- .../about_optimizer_hints.mdx | 52 +++++++++++ .../05_optimizer_hints/index.mdx | 61 +++---------- .../optimizing_code/index.mdx | 2 +- .../optimizing_code/optimizing_code.mdx | 2 +- .../01_all_part_tables.mdx | 0 .../02_all_tab_partitions.mdx | 0 .../03_all_tab_subpartitions.mdx | 0 .../04_all_part_key_columns.mdx | 0 .../05_all_subpart_key_columns.mdx | 0 .../index.mdx | 3 + 32 files changed, 240 insertions(+), 171 deletions(-) create mode 100644 product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/declaring_parameters.mdx create mode 100644 product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/about_optimizer_hints.mdx rename product_docs/docs/epas/15/{application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table => reference/application_programmer_reference}/01_table_partitioning_views_reference/01_all_part_tables.mdx (100%) rename product_docs/docs/epas/15/{application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table => reference/application_programmer_reference}/01_table_partitioning_views_reference/02_all_tab_partitions.mdx (100%) rename product_docs/docs/epas/15/{application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table => reference/application_programmer_reference}/01_table_partitioning_views_reference/03_all_tab_subpartitions.mdx (100%) rename product_docs/docs/epas/15/{application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table => reference/application_programmer_reference}/01_table_partitioning_views_reference/04_all_part_key_columns.mdx (100%) rename product_docs/docs/epas/15/{application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table => reference/application_programmer_reference}/01_table_partitioning_views_reference/05_all_subpart_key_columns.mdx (100%) rename product_docs/docs/epas/15/{application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table => reference/application_programmer_reference}/01_table_partitioning_views_reference/index.mdx (88%) diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure.mdx index 788d5942e65..903e48bb021 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/01_creating_a_procedure.mdx @@ -11,7 +11,9 @@ The `CREATE PROCEDURE` command defines and names a standalone procedure that's s If you include a schema name, then the procedure is created in the specified schema. Otherwise it's created in the current schema. The name of the new procedure must not match any existing procedure with the same input argument types in the same schema. However, procedures of different input argument types can share a name. This is called *overloading*. !!! Note - Overloading of procedures is an EDB Postgres Advanced Server feature. Overloading of stored, standalone procedures isn't compatible with Oracle databases. + Overloading of procedures is an EDB Postgres Advanced Server feature. **Overloading of stored, standalone procedures isn't compatible with Oracle databases.** + +## Updating the definition of an existing procedure To update the definition of an existing procedure, use `CREATE OR REPLACE PROCEDURE`. You can't change the name or argument types of a procedure this way. Attempting to do so creates a new, distinct procedure. When using `OUT` parameters, you can't change the types of any `OUT` parameters except by dropping the procedure. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/index.mdx index a3b1e4d7f7c..34ded5ab71e 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/04_procedures_overview/index.mdx @@ -1,5 +1,6 @@ --- title: "Procedures overview" +indexCards: simple legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.052.html" diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/01_creating_a_function.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/01_creating_a_function.mdx index 7bc083e10c9..7b38e4b6e46 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/01_creating_a_function.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/01_creating_a_function.mdx @@ -10,8 +10,10 @@ The `CREATE FUNCTION` command defines and names a standalone function to store i If a schema name is included, then the function is created in the specified schema. Otherwise it's created in the current schema. The name of the new function must not match any existing function with the same input argument types in the same schema. However, functions of different input argument types can share a name. Sharing a name is called *overloading*. -!!! Note - Overloading functions is an EDB Postgres Advanced Server feature. Overloading stored, standalone functions isn't compatible with Oracle databases. +!!! Note + Overloading functions is an EDB Postgres Advanced Server feature. **Overloading stored, standalone functions isn't compatible with Oracle databases.** + +## Updating the definition of an existing function To update the definition of an existing function, use `CREATE OR REPLACE FUNCTION`. You can't change the name or argument types of a function this way. If you try to, you instead create a new, distinct function. Also, `CREATE OR REPLACE FUNCTION` doesn't let you change the return type of an existing function. To do that, you must drop and recreate the function. When using `OUT` parameters, you can't change the types of any `OUT` parameters except by dropping the function. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/index.mdx index 299ed4295ef..a93612da4d1 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/05_functions_overview/index.mdx @@ -1,5 +1,6 @@ --- title: "Functions overview" +indexCards: simple legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.053.html" diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/01_positional_vs_named_parameter_notation.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/01_positional_vs_named_parameter_notation.mdx index 70b35bb5fed..323e2d6eb98 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/01_positional_vs_named_parameter_notation.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/01_positional_vs_named_parameter_notation.mdx @@ -6,9 +6,11 @@ redirects: -You can use either *positional* or *named* parameter notation when passing parameters to a function or procedure. If you specify parameters using positional notation, you must list the parameters in the order that they are declared. If you specify parameters with named notation, the order of the parameters doesn't matter. +You can use either *positional* or *named* parameter notation when passing parameters to a function or procedure. -To specify parameters using named notation, list the name of each parameter followed by an arrow (`=>`) and the parameter value. Named notation is more verbose but makes your code easier to read and maintain. +- If you specify parameters using *positional notation*, you must list the parameters in the order that they are declared. If you specify parameters with named notation, the order of the parameters doesn't matter. + +- If you specify parameters using *named notation*, list the name of each parameter followed by an arrow (`=>`) and the parameter value. Named notation is more verbose but makes your code easier to read and maintain. This example uses positional and named parameter notation: diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/02_parameter_modes.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/02_parameter_modes.mdx index faffd96efff..7e6ff851154 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/02_parameter_modes.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/02_parameter_modes.mdx @@ -23,8 +23,10 @@ The following table summarizes the behavior of parameters according to their mod | Actual parameter contains: (after a handled exception in the called program) | Original actual parameter value prior to the call | Last value of the formal parameter | Last value of the formal parameter | | Actual parameter contains: (after an unhandled exception in the called program) | Original actual parameter value prior to the call | Original actual parameter value prior to the call | Original actual parameter value prior to the call | -As shown by the table, an `IN` formal parameter is initialized to the actual parameter with which it's called unless it was explicitly initialized with a default value. You can reference the `IN` parameter in the called program. However, the called program can't assign a new value to the `IN` parameter. After control returns to the calling program, the actual parameter always contains the same value that it had prior to the call. +As shown by the table: -The `OUT` formal parameter is initialized to the actual parameter with which it's called. The called program can reference and assign new values to the formal parameter. If the called program ends without an exception, the actual parameter takes on the value last set in the formal parameter. If a handled exception occurs, the value of the actual parameter takes on the last value assigned to the formal parameter. If an unhandled exception occurs, the value of the actual parameter remains as it was prior to the call. +- The `IN` formal parameter is initialized to the actual parameter with which it's called unless it was explicitly initialized with a default value. You can reference the `IN` parameter in the called program. However, the called program can't assign a new value to the `IN` parameter. After control returns to the calling program, the actual parameter always contains the same value that it had prior to the call. -Like an `IN` parameter, an `IN OUT` formal parameter is initialized to the actual parameter with which it's called. Like an `OUT` parameter, an `IN OUT` formal parameter can be modifiwed by the called program. The last value in the formal parameter is passed to the calling program’s actual parameter if the called program ends without an exception. If a handled exception occurs, the value of the actual parameter takes on the last value assigned to the formal parameter. If an unhandled exception occurs, the value of the actual parameter remains as it was prior to the call. +- Like an `IN` parameter, an `IN OUT` formal parameter is initialized to the actual parameter with which it's called. Like an `OUT` parameter, an `IN OUT` formal parameter can be modifiwed by the called program. The last value in the formal parameter is passed to the calling program’s actual parameter if the called program ends without an exception. If a handled exception occurs, the value of the actual parameter takes on the last value assigned to the formal parameter. If an unhandled exception occurs, the value of the actual parameter remains as it was prior to the call. + +- The `OUT` formal parameter is initialized to the actual parameter with which it's called. The called program can reference and assign new values to the formal parameter. If the called program ends without an exception, the actual parameter takes on the value last set in the formal parameter. If a handled exception occurs, the value of the actual parameter takes on the last value assigned to the formal parameter. If an unhandled exception occurs, the value of the actual parameter remains as it was prior to the call. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/03_using_default_values_in_parameters.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/03_using_default_values_in_parameters.mdx index 8671c7734d6..cd713aaf133 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/03_using_default_values_in_parameters.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/03_using_default_values_in_parameters.mdx @@ -8,6 +8,8 @@ redirects: You can set a default value of a formal parameter by including the `DEFAULT` clause or using the assignment operator (`:=`) in the `CREATE PROCEDURE` or `CREATE FUNCTION` statement. +## Syntax + The general form of a formal parameter declaration is: ```text @@ -24,6 +26,8 @@ The general form of a formal parameter declaration is: The default value is evaluated every time you invoke the function or procedure. For example, assigning `SYSDATE` to a parameter of type `DATE` causes the parameter to have the time of the current invocation, not the time when the procedure or function was created. +## Example + This example uses the assignment operator to set a default value of `SYSDATE` into the parameter `hiredate:` ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/declaring_parameters.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/declaring_parameters.mdx new file mode 100644 index 00000000000..807061bacd6 --- /dev/null +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/declaring_parameters.mdx @@ -0,0 +1,87 @@ +--- +title: "Declaring parameters" +--- + +Declare parameters in the procedure or function definition, and enclose them in parentheses following the procedure or function name. Parameters declared in the procedure or function definition are known as *formal parameters*. When you invoke the procedure or function, the calling program supplies the actual data to use in the called program’s processing as well as the variables that receive the results of the called program’s processing. The data and variables supplied by the calling program when the procedure or function is called are referred to as the *actual parameters*. + +The following is the general format of a formal parameter declaration. + +```text +( [ IN | OUT | IN OUT ] [ DEFAULT ]) +``` + +- `name` is an identifier assigned to the formal parameter. +- Whether a parameter is `IN`, `OUT`, or `IN OUT` is referred to as the parameter’s *mode*. If specified, `IN` defines the parameter for receiving input data into the procedure or function. An `IN` parameter can also be initialized to a default value. If specified, `OUT` defines the parameter for returning data from the procedure or function. If specified, `IN OUT` allows the parameter to be used for both input and output. If all of `IN`, `OUT`, and `IN OUT` are omitted, then the parameter acts as if it were defined as `IN` by default. +- `data_type` defines the data type of the parameter. +- `value` is a default value assigned to an `IN` parameter in the called program if you don't specify an actual parameter in the call. + +This example shows a procedure that takes parameters: + +```sql +CREATE OR REPLACE PROCEDURE emp_query ( + p_deptno IN NUMBER, + p_empno IN OUT NUMBER, + p_ename IN OUT VARCHAR2, + p_job OUT VARCHAR2, + p_hiredate OUT DATE, + p_sal OUT NUMBER +) +IS +BEGIN + SELECT empno, ename, job, hiredate, sal + INTO p_empno, p_ename, p_job, p_hiredate, p_sal + FROM emp + WHERE deptno = p_deptno + AND (empno = p_empno + OR ename = UPPER(p_ename)); +END; +``` + +In this example, `p_deptno` is an `IN` formal parameter, `p_empno` and `p_ename` are `IN OUT` formal parameters, and `p_job, p_hiredate` and `p_sal` are `OUT` formal parameters. + +!!! Note + In the example, no maximum length was specified on the `VARCHAR2` parameters and no precision and scale were specified on the `NUMBER` parameters. It's illegal to specify a length, precision, scale, or other constraints on parameter declarations. These constraints are inherited from the actual parameters that are used when the procedure or function is called. + +The `emp_query` procedure can be called by another program, passing it the actual parameters. This example is another SPL program that calls `emp_query`. + +```sql +DECLARE + v_deptno NUMBER(2); + v_empno NUMBER(4); + v_ename VARCHAR2(10); + v_job VARCHAR2(9); + v_hiredate DATE; + v_sal NUMBER; +BEGIN + v_deptno := 30; + v_empno := 7900; + v_ename := ''; + emp_query(v_deptno, v_empno, v_ename, v_job, v_hiredate, v_sal); + DBMS_OUTPUT.PUT_LINE('Department : ' || v_deptno); + DBMS_OUTPUT.PUT_LINE('Employee No: ' || v_empno); + DBMS_OUTPUT.PUT_LINE('Name : ' || v_ename); + DBMS_OUTPUT.PUT_LINE('Job : ' || v_job); + DBMS_OUTPUT.PUT_LINE('Hire Date : ' || v_hiredate); + DBMS_OUTPUT.PUT_LINE('Salary : ' || v_sal); +END; +``` + +In this example, `v_deptno`, `v_empno`, `v_ename`, `v_job`, `v_hiredate`, and `v_sal` are the actual parameters. + +The output from the example is: + +```sql +__OUTPUT__ +Department : 30 +Employee No: 7900 +Name : JAMES +Job : CLERK +Hire Date : 03-DEC-81 +Salary : 950 +``` + +
+ +positional_vs_named_parameter_notation parameter_modes using_default_values_in_parameters + +
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx index 02e7e67a640..406fca1626d 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx @@ -1,5 +1,11 @@ --- title: "Procedure and function parameters" +indexCards: simple +navigation: +- declaring_parameters +- 01_positional_vs_named_parameter_notation +- 02_parameter_modes +- 03_using_default_values_in_parameters legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.054.html" @@ -12,86 +18,3 @@ redirects: An important aspect of using procedures and functions is the capability to pass data from the calling program to the procedure or function and to receive data back from the procedure or function. You do this by using *parameters*. -Declare parameters in the procedure or function definition, and enclose them in parentheses following the procedure or function name. Parameters declared in the procedure or function definition are known as *formal parameters*. When you invoke the procedure or function, the calling program supplies the actual data to use in the called program’s processing as well as the variables that receive the results of the called program’s processing. The data and variables supplied by the calling program when the procedure or function is called are referred to as the *actual parameters*. - -The following is the general format of a formal parameter declaration. - -```text -( [ IN | OUT | IN OUT ] [ DEFAULT ]) -``` - -- `name` is an identifier assigned to the formal parameter. -- Whether a parameter is `IN`, `OUT`, or `IN OUT` is referred to as the parameter’s *mode*. If specified, `IN` defines the parameter for receiving input data into the procedure or function. An `IN` parameter can also be initialized to a default value. If specified, `OUT` defines the parameter for returning data from the procedure or function. If specified, `IN OUT` allows the parameter to be used for both input and output. If all of `IN`, `OUT`, and `IN OUT` are omitted, then the parameter acts as if it were defined as `IN` by default. -- `data_type` defines the data type of the parameter. -- `value` is a default value assigned to an `IN` parameter in the called program if you don't specify an actual parameter in the call. - -This example shows a procedure that takes parameters: - -```sql -CREATE OR REPLACE PROCEDURE emp_query ( - p_deptno IN NUMBER, - p_empno IN OUT NUMBER, - p_ename IN OUT VARCHAR2, - p_job OUT VARCHAR2, - p_hiredate OUT DATE, - p_sal OUT NUMBER -) -IS -BEGIN - SELECT empno, ename, job, hiredate, sal - INTO p_empno, p_ename, p_job, p_hiredate, p_sal - FROM emp - WHERE deptno = p_deptno - AND (empno = p_empno - OR ename = UPPER(p_ename)); -END; -``` - -In this example, `p_deptno` is an `IN` formal parameter, `p_empno` and `p_ename` are `IN OUT` formal parameters, and `p_job, p_hiredate` and `p_sal` are `OUT` formal parameters. - -!!! Note - In the example, no maximum length was specified on the `VARCHAR2` parameters and no precision and scale were specified on the `NUMBER` parameters. It's illegal to specify a length, precision, scale, or other constraints on parameter declarations. These constraints are inherited from the actual parameters that are used when the procedure or function is called. - -The `emp_query` procedure can be called by another program, passing it the actual parameters. This example is another SPL program that calls `emp_query`. - -```sql -DECLARE - v_deptno NUMBER(2); - v_empno NUMBER(4); - v_ename VARCHAR2(10); - v_job VARCHAR2(9); - v_hiredate DATE; - v_sal NUMBER; -BEGIN - v_deptno := 30; - v_empno := 7900; - v_ename := ''; - emp_query(v_deptno, v_empno, v_ename, v_job, v_hiredate, v_sal); - DBMS_OUTPUT.PUT_LINE('Department : ' || v_deptno); - DBMS_OUTPUT.PUT_LINE('Employee No: ' || v_empno); - DBMS_OUTPUT.PUT_LINE('Name : ' || v_ename); - DBMS_OUTPUT.PUT_LINE('Job : ' || v_job); - DBMS_OUTPUT.PUT_LINE('Hire Date : ' || v_hiredate); - DBMS_OUTPUT.PUT_LINE('Salary : ' || v_sal); -END; -``` - -In this example, `v_deptno`, `v_empno`, `v_ename`, `v_job`, `v_hiredate`, and `v_sal` are the actual parameters. - -The output from the example is: - -```sql -__OUTPUT__ -Department : 30 -Employee No: 7900 -Name : JAMES -Job : CLERK -Hire Date : 03-DEC-81 -Salary : 950 -``` - -
- -positional_vs_named_parameter_notation parameter_modes using_default_values_in_parameters - -
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/08_compilation_errors_in_procedures_and_functions.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/08_compilation_errors_in_procedures_and_functions.mdx index 4c081410cb5..b6f134fe171 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/08_compilation_errors_in_procedures_and_functions.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/08_compilation_errors_in_procedures_and_functions.mdx @@ -12,6 +12,8 @@ redirects: When the EDB Postgres Advanced Server parsers compile a procedure or function, they confirm that both the `CREATE` statement and the program body (the portion of the program that follows the `AS` keyword) conform to the grammar rules for SPL and SQL constructs. By default, the server stops compiling if a parser detects an error. The parsers detect syntax errors in expressions, but they don't detect semantic errors. Semantic errors include an expression referencing a nonexistent column, table, or function, or a value of incorrect type. +## Setting an error count compilation limit + `spl.max_error_count` instructs the server to stop parsing if it encounters the specified number of errors in SPL code or when it encounters an error in SQL code. The default value of `spl.max_error_count` is `10`. The maximum value is `1000`. Setting `spl.max_error_count` to a value of `1` instructs the server to stop parsing when it encounters the first error in either SPL or SQL code. You can use the `SET` command to specify a value for `spl.max_error_count` for your current session. The syntax is: @@ -25,6 +27,7 @@ Where `number_of_errors` specifies the number of SPL errors that can occur befor ```sql SET spl.max_error_count = 6 ``` +## Example The example instructs the server to continue past the first five SPL errors it encounters. When the server encounters the sixth error, it stops validating and prints six detailed error messages and one error summary. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/01_oracle_compat_summary.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/01_oracle_compat_summary.mdx index b5765f94643..9d857ff7329 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/01_oracle_compat_summary.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/01_oracle_compat_summary.mdx @@ -1,5 +1,5 @@ --- -title: "Oracle compatibility summary" +title: "Oracle table partitioning compatibility summary" --- EDB Postgres Advanced Server supports aspects of table partitioning that are compatible with Oracle databases. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/01_interval_range_partitioning.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/01_interval_range_partitioning.mdx index 1fa1465cc48..b13049ca8de 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/01_interval_range_partitioning.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/01_interval_range_partitioning.mdx @@ -10,7 +10,9 @@ Interval range partitioning is an extension to range partitioning that allows a The high value of a range partition, also known as the transition point, is determined by the range partitioning key value. The database creates partitions for inserted data with values that are beyond that high value. -Suppose an interval is set to one month. If data is inserted for two months after the current transition point, only the partition for the second month is created and not the intervening partition. For example, you can create an interval-range-partitioned table with a monthly interval and a current transition point of February 15, 2019. If you try to insert data for May 10, 2019, then the required partition for April 15 to May 15, 2019 is created and data is inserted into that partition. The partition for February 15, 2019 to March 15, 2019 and March 15, 2019 to April 15, 2019 is skipped. +## Interval range partitioning example + +Suppose an interval is set to one month. If data is inserted for two months after the current transition point, only the partition for the second month is created and not the intervening partition. For example, you can create an interval-range-partitioned table with a monthly interval and a current transition point of February 15, 2023. If you try to insert data for May 10, 2023, then the required partition for April 15 to May 15, 2023 is created and data is inserted into that partition. The partition for February 15, 2023 to March 15, 2023 and March 15, 2023 to April 15, 2023 is skipped. For information about interval range partitioning syntax, see [CREATE TABLE...PARTITION BY](/../../../reference/oracle_compatibility_reference/04_partitioning_commands_compatible_with_oracle_databases/01_create_table_partition_by/#create_table_partition_by). diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/02_automatic_list_partitioning.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/02_automatic_list_partitioning.mdx index c3e10d5b067..1fde123939e 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/02_automatic_list_partitioning.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/02_automatic_list_partitioning.mdx @@ -12,7 +12,7 @@ For example, consider a table named `sales` with a `sales_state` column that con For information about automatic list partitioning syntax, see [CREATE TABLE...PARTITION BY](/../../../reference/oracle_compatibility_reference/04_partitioning_commands_compatible_with_oracle_databases/01_create_table_partition_by/#create_table_partition_by). -## Restrictions on automatic list partitioning +## Restrictions for automatic list partitioning The following restrictions apply to the `AUTOMATIC` clause: diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/index.mdx index d8db0547a5f..cb4a6854cb1 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/02_selecting_a_partition_type/index.mdx @@ -22,10 +22,14 @@ When you create a list-partitioned table, you specify a single partitioning key !!! Note List partitioning doesn't support multi-column list partitioning. +See [Automatic list partitioning](02_automatic_list_partitioning.mdx) for information about an extension to `LIST` partitioning that enables a database to automatically create a partition for any new distinct value of the list partitioning key. + ## Range partitioning When you create a range-partitioned table, you specify one or more partitioning key columns. When you add a row to the table, the server compares the value of the partitioning keys to the corresponding columns in a table entry. If the column values satisfy the conditions specified in the partitioning rule, the row is stored in the partition named in the rule. +See [Interval range partitioning](01_interval_range_partitioning.mdx) for information about an extension to range partitioning that enables a database to create a partition when the inserted data exceeds the range of an existing partition. + ## Hash partitioning When you create a hash-partitioned table, you specify one or more partitioning key columns. Data is divided into approximately equal-sized partitions among the specified partitions. When you add a row to a hash-partitioned table, the server computes a hash value for the data in the specified columns and stores the row in a partition according to the hash value. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/03_using_partition_pruning/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/03_using_partition_pruning/index.mdx index d07530a091b..d170df23830 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/03_using_partition_pruning/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/03_using_partition_pruning/index.mdx @@ -19,15 +19,19 @@ EDB Postgres Advanced Server's query planner uses *partition pruning* to compute | Range-partitioned table | The `WHERE` clause compares a literal value to a partitioning key using operators such as equal (=), less than (<), or greater than (>). | Hash-partitioned table | The `WHERE` clause compares a literal value to the partitioning key using an operator such as equal (=). | +## Partition pruning techniques + The partition pruning mechanism uses two optimization techniques: -- Fast pruning - Constraint exclusion +- Fast pruning Partition pruning techniques limit the search for data only to those partitions where the values you're searching for might reside. Both pruning techniques remove partitions from a query's execution plan, improving performance. The difference between the fast pruning and constraint exclusion is that fast pruning understands the relationship between the partitions in an Oracle-partitioned table. Constraint exclusion doesn't. For example, when a query searches for a specific value in a list-partitioned table, fast pruning can reason that only a specific partition can hold that value. Constraint exclusion must examine the constraints defined for each partition. Fast pruning occurs early in the planning process to reduce the number of partitions that the planner must consider. Constraint exclusion occurs late in the planning process. +[This example](01_example_partition_pruning.mdx) shows the efficiency of partition pruning, using the `EXPLAIN` statement to confirm that EDB Postgres Advanced Server is pruning partitions from the execution plan of a query. + ## Using constraint exclusion The `constraint_exclusion` parameter controls constraint exclusion. The `constraint_exclusion` parameter can have a value of `on`, `off`, or `partition`. To enable constraint exclusion, you must set the parameter to either `partition` or `on`. By default, the parameter is set to `partition`. @@ -44,7 +48,7 @@ When you execute a `SELECT` statement that doesn't contain a `WHERE` clause, the If you aren't using partitioned tables, disabling constraint exclusion might improve performance. -## Fast pruning +## Using fast pruning Like constraint exclusion, fast pruning can optimize only queries that include a `WHERE` or join clause. However, the qualifiers in the `WHERE` clause must match a certain form. In both cases, the query planner avoids searching for data in partitions that can't hold the data required by the query. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/index.mdx index ecbba701c68..c022f2a9883 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/index.mdx @@ -23,7 +23,7 @@ You can query the following views to retrieve information about partitioned and - `ALL_PART_KEY_COLUMNS` - `ALL_SUBPART_KEY_COLUMNS` -The structure of each view is explained in [Table partitioning views reference](01_table_partitioning_views_reference/#table_partitioning_views_reference). If you're using the EDB-PSQL client, you can also learn about the structure of a view by entering: +The structure of each view is explained in [Table partitioning views reference](../../../reference/application_programmer_reference/01_table_partitioning_views_reference/#table_partitioning_views_reference). If you're using the EDB-PSQL client, you can also learn about the structure of a view by entering: `\d ` diff --git a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/01_default_optimization_modes.mdx b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/01_default_optimization_modes.mdx index 3f525f1d6a4..ecf09aa1770 100644 --- a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/01_default_optimization_modes.mdx +++ b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/01_default_optimization_modes.mdx @@ -27,7 +27,7 @@ The table shows the possible values. These optimization modes are based on the assumption that the client submitting the SQL command is interested in viewing only the first *n* rows of the result set and not the remainder of the result set. Resources allocated to the query are adjusted as such. -## Examples +## Example: Specifying the number of rows to retrieve in the result set Alter the current session to optimize for retrieval of the first 10 rows of the result set: @@ -35,6 +35,8 @@ Alter the current session to optimize for retrieval of the first 10 rows of the ALTER SESSION SET OPTIMIZER_MODE = FIRST_ROWS_10; ``` +## Example: Showing the current value of the OPTIMIZER_MODE parameter + You can show the current value of the `OPTIMIZER_MODE` parameter by using the `SHOW` command. This command depends on the utility. In PSQL, use the `SHOW` command as follows: ```sql diff --git a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/02_access_method_hints.mdx b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/02_access_method_hints.mdx index b626f6358fa..288edc199a3 100644 --- a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/02_access_method_hints.mdx +++ b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/02_access_method_hints.mdx @@ -23,11 +23,11 @@ In addition, you can use the `ALL_ROWS`, `FIRST_ROWS`, and `FIRST_ROWS(n)` hints `INDEX` and `NO_INDEX` hints for the partitioned table internally expand to include the corresponding inherited child indexes and apply in later processing. -## Examples +## About the examples The sample application doesn't have enough data to show the effects of optimizer hints. Thus the remainder of these examples use a banking database created by the `pgbench` application located in the EDB Postgres Advanced Server `bin` subdirectory. -## Create a sample database and tables +## Example: Create a sample database and tables The following steps create a database named, `bank` populated by the tables `pgbench_accounts, pgbench_branches, pgbench_tellers`, and `pgbench_history`. The `–s 20` option specifies a scaling factor of 20, which results in the creation of 20 branches. Each branch has 100,000 accounts. The result is a total of 2,000,000 rows in the `pgbench_accounts` table and 20 rows in the `pgbench_branches` table. Ten tellers are assigned to each branch resulting, in a total of 200 rows in the `pgbench_tellers` table. @@ -152,7 +152,7 @@ rows=1 width=97) (2 rows) ``` -## FULL hint example +## Example: FULL hint The `FULL` hint forces a full sequential scan instead of using the index: @@ -167,7 +167,7 @@ Seq Scan on pgbench_accounts (cost=0.00..58781.69 rows=1 width=97) (2 rows) ``` -## NO_INDEX hint example +## Example: NO_INDEX hint The `NO_INDEX` hint forces a parallel sequential scan instead of using the index: @@ -230,7 +230,7 @@ rows=1 width=97) (2 rows) ``` -## INDEX hint example for the partitioned table +## Example: INDEX hint for the partitioned table ```sql diff --git a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/04_joining_relations_hints.mdx b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/04_joining_relations_hints.mdx index 8b7425c866d..c5c082196bb 100644 --- a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/04_joining_relations_hints.mdx +++ b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/04_joining_relations_hints.mdx @@ -13,9 +13,11 @@ redirects: When you join two tables, you can use any of three plans to perform the join. -- Nested loop join — A table is scanned once for every row in the other joined table. -- Merge sort join — Each table is sorted on the join attributes before the join starts. The two tables are then scanned in parallel, and the matching rows are combined to form the join rows. -- Hash join — A table is scanned and its join attributes are loaded into a hash table using its join attributes as hash keys. The other joined table is then scanned and its join attributes are used as hash keys to locate the matching rows from the first table. +- **Nested loop join** — A table is scanned once for every row in the other joined table. +- **Merge sort join** — Each table is sorted on the join attributes before the join starts. The two tables are then scanned in parallel, and the matching rows are combined to form the join rows. +- **Hash join** — A table is scanned and its join attributes are loaded into a hash table using its join attributes as hash keys. The other joined table is then scanned and its join attributes are used as hash keys to locate the matching rows from the first table. + +## List of optimizer hints for join plans The following table lists the optimizer hints that you can use to influence the planner to use one type of join plan over another. @@ -28,7 +30,7 @@ The following table lists the optimizer hints that you can use to influence the | `USE_NL(table [...])` | Use a nested loop join for `table`. | | `NO_USE_NL(table [...])` | Don't use a nested loop join for `table`. | -## Examples +## Example: Hash join In this example, the `USE_HASH` hint is used for a join on the `pgbench_branches` and `pgbench_accounts` tables. The query plan shows that a hash join is used by creating a hash table from the join attribute of the `pgbench_branches` table: @@ -89,6 +91,8 @@ __OUTPUT__ (9 rows) ``` +## Example: Three-table join + In this three-table join example, the planner first performs a hash join on the `pgbench_branches` and `pgbench_history` tables. Then it performs a hash join of the result with the `pgbench_accounts` table. ```sql diff --git a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/05_global_hints.mdx b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/05_global_hints.mdx index 36d34cecebf..c550258daa7 100644 --- a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/05_global_hints.mdx +++ b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/05_global_hints.mdx @@ -11,7 +11,7 @@ redirects: -In addition to applying hits directly to tables that are referenced in the SQL command, you can apply hints to tables that appear in a view when the view is referenced in the SQL command. The hint doesn't appear in the view but in the SQL command that references the view. +In addition to applying hints directly to tables that are referenced in the SQL command, you can apply hints to tables that appear in a view when the view is referenced in the SQL command. The hint doesn't appear in the view but in the SQL command that references the view. When specifying a hint that applies to a table in a view, give the view and table names in dot notation in the hint argument list. @@ -35,7 +35,7 @@ When specifying a hint that applies to a table in a view, give the view and tabl The table on which to apply the hint. -## Examples +## Example: Applying hints to a stored view A view named `tx` is created from the three-table join of `pgbench_history`, `pgbench_branches`, and `pgbench_accounts`, shown in the last example of [Joining relations hints](04_joining_relations_hints/#joining_relations_hints). @@ -89,7 +89,9 @@ __OUTPUT__ (13 rows) ``` -In addition to applying hints to tables in stored views, you can apply hints to tables in subqueries as shown by this example. In this query on the sample application `emp` table, employees and their managers are listed by joining the `emp` table with a subquery of the `emp` table identified by the alias `b`: +## Applying hints to tables in subqueries + +In addition to applying hints to tables in stored views, you can apply hints to tables in subqueries. In this query on the sample application `emp` table, employees and their managers are listed by joining the `emp` table with a subquery of the `emp` table identified by the alias `b`: ```sql SELECT a.empno, a.ename, b.empno "mgr empno", b.ename "mgr ename" FROM emp a, diff --git a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/06_using_the_append_optimizer_hint.mdx b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/06_using_the_append_optimizer_hint.mdx index 7c514fb2bae..535b32fbbf0 100644 --- a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/06_using_the_append_optimizer_hint.mdx +++ b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/06_using_the_append_optimizer_hint.mdx @@ -1,5 +1,5 @@ --- -title: "Using the APPEND optimizer hint" +title: "APPEND optimizer hint" legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.043.html" diff --git a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/07_parallelism_hints.mdx b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/07_parallelism_hints.mdx index bb8345a7e20..992ebea9cd3 100644 --- a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/07_parallelism_hints.mdx +++ b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/07_parallelism_hints.mdx @@ -9,9 +9,10 @@ redirects: -The `PARALLEL` optimizer hint forces parallel scanning. +*Parallel scanning* is the use of multiple background workers to simultaneously perform a scan of a table, that is, in parallel, for a given query. This process provides performance improvement over other methods such as the sequential scan. -The `NO_PARALLEL` optimizer hint prevents use of a parallel scan. +- The `PARALLEL` optimizer hint forces parallel scanning. +- The `NO_PARALLEL` optimizer hint prevents use of a parallel scan. ## Synopsis @@ -21,10 +22,6 @@ PARALLEL ( [ | DEFAULT ]) NO_PARALLEL (
) ``` -## Description - -*Parallel scanning* is the use of multiple background workers to simultaneously perform a scan of a table, that is, in parallel, for a given query. This process provides performance improvement over other methods such as the sequential scan. - ## Parameters `table` @@ -35,13 +32,12 @@ NO_PARALLEL (
) `parallel_degree` is a positive integer that specifies the desired number of workers to use for a parallel scan. If specified, the lesser of `parallel_degree` and configuration parameter `max_parallel_workers_per_gather` is used as the planned number of workers. For information on the `max_parallel_workers_per_gather` parameter, see *Asynchronous Behavior* under *Resource Consumption* in the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/runtime-config-resource.html). - If you specify `DEFAULT`, then the maximum possible parallel degree is used. - - If you omit both `parallel_degree` and `DEFAULT`, then the query optimizer determines the parallel degree. In this case, if `table` was set with the `parallel_workers` storage parameter, then this value is used as the parallel degree. Otherwise, the optimizer uses the maximum possible parallel degree as if `DEFAULT` were specified. For information on the `parallel_workers` storage parameter, see `Storage Parameters` under `CREATE TABLE` in the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-createtable.html). + - If you specify `DEFAULT`, then the maximum possible parallel degree is used. + - If you omit both `parallel_degree` and `DEFAULT`, then the query optimizer determines the parallel degree. In this case, if `table` was set with the `parallel_workers` storage parameter, then this value is used as the parallel degree. Otherwise, the optimizer uses the maximum possible parallel degree as if `DEFAULT` were specified. For information on the `parallel_workers` storage parameter, see `Storage Parameters` under `CREATE TABLE` in the [PostgreSQL core documentation](https://www.postgresql.org/docs/current/static/sql-createtable.html). Regardless of the circumstance, the parallel degree never exceeds the setting of configuration parameter `max_parallel_workers_per_gather`. -## Examples +## About the examples For these exammples, the following configuration parameter settings are in effect: @@ -62,6 +58,8 @@ __OUTPUT__ (1 row) ``` +## Example: Default scan + This example shows the default scan on table `pgbench_accounts`. A sequential scan is shown in the query plan. ```sql @@ -75,6 +73,8 @@ __OUTPUT__ (1 row) ``` +## Example: PARALLEL hint + This example uses the `PARALLEL` hint. In the query plan, the Gather node, which launches the background workers, indicates the plan to use two workers: !!! Note @@ -176,6 +176,8 @@ Indexes: Options: fillfactor=100, parallel_workers=3 ``` +## Example: PARALLEL hint is given with no parallel degree + When the `PARALLEL` hint is given with no parallel degree, the resulting number of planned workers is the value from the `parallel_workers` parameter: ```sql @@ -192,6 +194,8 @@ __OUTPUT__ Specifying a parallel degree value or `DEFAULT` in the `PARALLEL` hint overrides the `parallel_workers` setting. +## Example: NO_PARALLEL hint + This example shows the `NO_PARALLEL` hint. With `trace_hints` set to `on`, the `INFO: [HINTS]` message states that the parallel scan was rejected due to the `NO_PARALLEL` hint. ```sql diff --git a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/about_optimizer_hints.mdx b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/about_optimizer_hints.mdx new file mode 100644 index 00000000000..9e16c46c6d0 --- /dev/null +++ b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/about_optimizer_hints.mdx @@ -0,0 +1,52 @@ +--- +title: "About optimizer hints" +--- + +An *optimizer hint* is one or more directives embedded in a comment-like syntax that immediately follows a `DELETE`, `INSERT`, `SELECT` or `UPDATE` command. Keywords in the comment instruct the server to use or avoid a specific plan when producing the result set. + +## Synopsis + +```sql +{ DELETE | INSERT | SELECT | UPDATE } /*+ { [ ] } [...] */ + + +{ DELETE | INSERT | SELECT | UPDATE } --+ { [ ] } [...] + +``` + + In both forms, a plus sign (+) must immediately follow the `/*` or `--` opening comment symbols, with no intervening space. Otherwise the server doesn't interpret the tokens that follow as hints. + +If you're using the first form, the hint and optional comment might span multiple lines. In the second form, all hints and comments must occupy a single line. The rest of the statement must start on a new line. + +## Description + +Note: + +- The database server always tries to use the specified hints if at all possible. +- If a planner method parameter is set so as to disable a certain plan type, then this plan isn't used even if it is specified in a hint, unless there are no other possible options for the planner. Examples of planner method parameters are `enable_indexscan`, `enable_seqscan`, `enable_hashjoin`, `enable_mergejoin`, and `enable_nestloop`. These are all Boolean parameters. +- The hint is embedded in a comment. As a consequence, if the hint is misspelled or if any parameter to a hint such as view, table, or column name is misspelled or nonexistent in the SQL command, there's no indication that an error occurred. No syntax error is given. The entire hint is silently ignored. +- If an alias is used for a table name in the SQL command, then you must use the alias name in the hint, not the original table name. For example, in the command `SELECT /*+ FULL(acct) */ * FROM accounts acct ..., acct`, you must specify the alias for `accounts` in the `FULL` hint, not in the table name `accounts`. + +Use the `EXPLAIN` command to ensure that the hint is correctly formed and the planner is using the hint. + +In general, don't use optimizer hints in a production application, where table data changes throughout the life of the application. By ensuring that dynamic columns are analyzed frequently via the `ANALYZE` command, the column statistics are updated to reflect value changes. The planner uses such information to produce the lowest-cost plan for any given command execution. Use of optimizer hints defeats the purpose of this process and results in the same plan regardless of how the table data changes. + +## Parameters + +`hint` + + An optimizer hint directive. + +`comment` + + A string with additional information. Comments have restrictions as to what characters you can include. Generally, `comment` can consist only of alphabetic, numeric, the underscore, dollar sign, number sign, and space characters. These must also conform to the syntax of an identifier. Any subsequent hint is ignored if the comment isn't in this form. + +`statement_body` + + The remainder of the `DELETE`, `INSERT`, `SELECT`, or `UPDATE` command. + +
+ +default_optimization_modes access_method_hints specifying_a_join_order joining_relations_hints global_hints using_the_append_optimizer_hint parallelism_hints conflicting_hints + +
diff --git a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/index.mdx b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/index.mdx index e2ba0dec23e..2c67b8748da 100644 --- a/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/index.mdx +++ b/product_docs/docs/epas/15/application_programming/optimizing_code/05_optimizer_hints/index.mdx @@ -1,6 +1,17 @@ --- -title: "Optimizer hints" +title: "Using optimizer hints" description: "Describes how to generate and use optimizer hints" +indexCards: simple +navigation: +- about_optimizer_hints +- 01_default_optimization_modes +- 02_access_method_hints +- 03_specifying_a_join_order +- 04_joining_relations_hints +- 05_global_hints +- 06_using_the_append_optimizer_hint +- 07_parallelism_hints +- 08_conflicting_hints legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.037.html" @@ -17,51 +28,5 @@ When you invoke a `DELETE`, `INSERT`, `SELECT`, or `UPDATE` command, the server - Parameter values assigned to parameters in the `Query Tuning` section of the `postgresql.conf` file - Column statistics that were gathered by the ANALYZE command -As a rule, the query planner selects the least expensive plan. You can use an *optimizer hint* to influence the server as it selects a query plan. An optimizer hint is one or more directives embedded in a comment-like syntax that immediately follows a `DELETE`, `INSERT`, `SELECT` or `UPDATE` command. Keywords in the comment instruct the server to use or avoid a specific plan when producing the result set. +As a rule, the query planner selects the least expensive plan. You can use an *optimizer hint* to influence the server as it selects a query plan. -## Synopsis - -```sql -{ DELETE | INSERT | SELECT | UPDATE } /*+ { [ ] } [...] */ - - -{ DELETE | INSERT | SELECT | UPDATE } --+ { [ ] } [...] - -``` - - In both forms, a plus sign (+) must immediately follow the `/*` or `--` opening comment symbols, with no intervening space. Otherwise the server doesn't interpret the tokens that follow as hints. - -If you're using the first form, the hint and optional comment might span multiple lines. In the second form, all hints and comments must occupy a single line. The rest of the statement must start on a new line. - -## Description - -Note: - -- The database server always tries to use the specified hints if at all possible. -- If a planner method parameter is set so as to disable a certain plan type, then this plan isn't used even if it is specified in a hint, unless there are no other possible options for the planner. Examples of planner method parameters are `enable_indexscan`, `enable_seqscan`, `enable_hashjoin`, `enable_mergejoin`, and `enable_nestloop`. These are all Boolean parameters. -- The hint is embedded in a comment. As a consequence, if the hint is misspelled or if any parameter to a hint such as view, table, or column name is misspelled or nonexistent in the SQL command, there's no indication that an error occurred. No syntax error is given. The entire hint is silently ignored. -- If an alias is used for a table name in the SQL command, then you must use the alias name in the hint, not the original table name. For example, in the command `SELECT /*+ FULL(acct) */ * FROM accounts acct ..., acct`, you must specify the alias for `accounts` in the `FULL` hint, not in the table name `accounts`. - -Use the `EXPLAIN` command to ensure that the hint is correctly formed and the planner is using the hint. - -In general, don't use optimizer hints in a production application, where table data changes throughout the life of the application. By ensuring that dynamic columns are analyzed frequently via the `ANALYZE` command, the column statistics are updated to reflect value changes. The planner uses such information to produce the lowest-cost plan for any given command execution. Use of optimizer hints defeats the purpose of this process and results in the same plan regardless of how the table data changes. - -## Parameters - -`hint` - - An optimizer hint directive. - -`comment` - - A string with additional information. Comments have restrictions as to what characters you can include. Generally, `comment` can consist only of alphabetic, numeric, the underscore, dollar sign, number sign, and space characters. These must also conform to the syntax of an identifier. Any subsequent hint is ignored if the comment isn't in this form. - -`statement_body` - - The remainder of the `DELETE`, `INSERT`, `SELECT`, or `UPDATE` command. - -
- -default_optimization_modes access_method_hints specifying_a_join_order joining_relations_hints global_hints using_the_append_optimizer_hint parallelism_hints conflicting_hints - -
diff --git a/product_docs/docs/epas/15/application_programming/optimizing_code/index.mdx b/product_docs/docs/epas/15/application_programming/optimizing_code/index.mdx index e1ffcca53f9..da1a0cf02de 100644 --- a/product_docs/docs/epas/15/application_programming/optimizing_code/index.mdx +++ b/product_docs/docs/epas/15/application_programming/optimizing_code/index.mdx @@ -1,6 +1,6 @@ --- title: "Optimizing code" -description: "How to use EDB Postgres Advanced Server features to optimize database performance" +description: "Describes using EDB Postgres Advanced Server features to optimize database performance" indexCards: simple --- diff --git a/product_docs/docs/epas/15/application_programming/optimizing_code/optimizing_code.mdx b/product_docs/docs/epas/15/application_programming/optimizing_code/optimizing_code.mdx index bee28474d38..de6fcf6d57a 100644 --- a/product_docs/docs/epas/15/application_programming/optimizing_code/optimizing_code.mdx +++ b/product_docs/docs/epas/15/application_programming/optimizing_code/optimizing_code.mdx @@ -1,6 +1,6 @@ --- title: "Optimizing inefficient SQL code" -description: "Benefits of using the SQL Profiler utility to optimize code" +description: "Describes the benefits of using the SQL Profiler utility to optimize code" --- Inefficient SQL code is a leading cause of database performance problems. The challenge for database administrators and developers is locating and then optimizing this code in large, complex systems. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/01_all_part_tables.mdx b/product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/01_all_part_tables.mdx similarity index 100% rename from product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/01_all_part_tables.mdx rename to product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/01_all_part_tables.mdx diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/02_all_tab_partitions.mdx b/product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/02_all_tab_partitions.mdx similarity index 100% rename from product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/02_all_tab_partitions.mdx rename to product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/02_all_tab_partitions.mdx diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/03_all_tab_subpartitions.mdx b/product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/03_all_tab_subpartitions.mdx similarity index 100% rename from product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/03_all_tab_subpartitions.mdx rename to product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/03_all_tab_subpartitions.mdx diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/04_all_part_key_columns.mdx b/product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/04_all_part_key_columns.mdx similarity index 100% rename from product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/04_all_part_key_columns.mdx rename to product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/04_all_part_key_columns.mdx diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/05_all_subpart_key_columns.mdx b/product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/05_all_subpart_key_columns.mdx similarity index 100% rename from product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/05_all_subpart_key_columns.mdx rename to product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/05_all_subpart_key_columns.mdx diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/index.mdx b/product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/index.mdx similarity index 88% rename from product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/index.mdx rename to product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/index.mdx index 2897a1ee1a3..75bde14b7b3 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/index.mdx +++ b/product_docs/docs/epas/15/reference/application_programmer_reference/01_table_partitioning_views_reference/index.mdx @@ -1,5 +1,6 @@ --- title: "Table partitioning views reference" +indexCards: simple legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.121.html" @@ -7,6 +8,8 @@ legacyRedirectsGenerated: - "/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/9.5/EDB_Postgres_Enterprise_Guide.1.070.html" redirects: - ../../../../epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference #generated for docs/epas/reorg-role-use-case-mode + - ../../../../epas_compat_table_partitioning/07_retrieving_information_about_a_partitioned_table/01_table_partitioning_views_reference/ + --- From 86cb138b71c80044dc73107cbd1939df17bbbf91 Mon Sep 17 00:00:00 2001 From: francoughlin Date: Fri, 4 Aug 2023 17:24:26 -0400 Subject: [PATCH 02/38] Edits for Application programming SPL sub-branch Edits apply mostly to the SPL sub-branch --- .../index.mdx | 4 +- .../01_creating_a_subprocedure.mdx | 10 ++-- .../02_creating_a_subfunction.mdx | 8 +-- .../03_block_relationships.mdx | 6 ++- .../04_invoking_subprograms.mdx | 5 ++ .../06_overloading_subprograms.mdx | 16 ++++-- .../07_accessing_subprogram_variables.mdx | 12 +++++ .../index.mdx | 1 + .../09_program_security/index.mdx | 1 + .../01_declaring_a_variable.mdx | 4 ++ ...2_using__type_in_variable_declarations.mdx | 4 ++ ...using__row_type_in_record_declarations.mdx | 4 ++ ...ined_record_types_and_record_variables.mdx | 4 ++ .../03_pragma_autonomous_transaction.mdx | 6 ++- .../about_transactions.mdx | 12 +++-- .../epas_compat_spl/07_dynamic_sql.mdx | 4 ++ .../08_static_cursors/02_opening_a_cursor.mdx | 2 +- .../03_fetching_rows_from_a_cursor.mdx | 4 ++ .../02_declaring_a_cursor_variable/index.mdx | 1 + .../07_examples/index.mdx | 1 + .../10_collections/about_collections.mdx | 18 ++++--- .../01_basic_object_concepts/02_methods.mdx | 6 +-- .../01_basic_object_concepts/index.mdx | 5 +- .../01_object_type_specification_syntax.mdx | 8 +++ .../02_object_type_components/index.mdx | 1 + .../03_constructor_methods.mdx | 6 ++- .../03_creating_object_types/index.mdx | 1 + .../04_creating_object_instances.mdx | 6 +++ .../05_referencing_an_object.mdx | 4 ++ .../06_dropping_an_object_type.mdx | 5 ++ .../taking_a_snapshot.mdx | 52 +++++++++---------- 31 files changed, 162 insertions(+), 59 deletions(-) diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx index 406fca1626d..8f1e7955208 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx @@ -1,10 +1,10 @@ --- title: "Procedure and function parameters" -indexCards: simple navigation: - declaring_parameters - 01_positional_vs_named_parameter_notation -- 02_parameter_modes +- 02_parameter_modesindexCards: simple + - 03_using_default_values_in_parameters legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/01_creating_a_subprocedure.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/01_creating_a_subprocedure.mdx index be5df570dcf..a678366ff8b 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/01_creating_a_subprocedure.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/01_creating_a_subprocedure.mdx @@ -8,9 +8,11 @@ redirects: The `PROCEDURE` clause specified in the declaration section defines and names a subprocedure local to that block. -The term *block* refers to the SPL block structure consisting of an optional declaration section, a mandatory executable section, and an optional exception section. Blocks are the structures for standalone procedures and functions, anonymous blocks, subprograms, triggers, packages, and object type methods. +- The term *block* refers to the SPL block structure consisting of an optional declaration section, a mandatory executable section, and an optional exception section. Blocks are the structures for standalone procedures and functions, anonymous blocks, subprograms, triggers, packages, and object type methods. -The phrase *the identifier is local to the block* means that the identifier (that is, a variable, cursor, type, or subprogram) is declared in the declaration section of that block. Therefore, the SPL code can access it in the executable section and optional exception section of that block. +- The phrase *the identifier is local to the block* means that the identifier (that is, a variable, cursor, type, or subprogram) is declared in the declaration section of that block. Therefore, the SPL code can access it in the executable section and optional exception section of that block. + +## Declaring subprocedures You can declare subprocedures only after all the other variable, cursor, and type declarations included in the declaration section. Subprograms must be the last set of declarations. @@ -36,7 +38,7 @@ Where: - `statements` are SPL program statements. The `BEGIN - END` block can contain an `EXCEPTION` section. -## Examples +## Example: Subprocedure in an anonymous block This example is a subprocedure in an anonymous block: @@ -88,6 +90,8 @@ EMPNO ENAME 7934 MILLER ``` +## Example: Subprocedure in a trigger + This example is a subprocedure in a trigger: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/02_creating_a_subfunction.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/02_creating_a_subfunction.mdx index 93eeb09ab80..8034c3f9a79 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/02_creating_a_subfunction.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/02_creating_a_subfunction.mdx @@ -8,9 +8,11 @@ redirects: The `FUNCTION` clause specified in the declaration section defines and names a subfunction local to that block. -The term *block* refers to the SPL block structure consisting of an optional declaration section, a mandatory executable section, and an optional exception section. Blocks are the structures for standalone procedures and functions, anonymous blocks, subprograms, triggers, packages, and object type methods. +- The term *block* refers to the SPL block structure consisting of an optional declaration section, a mandatory executable section, and an optional exception section. Blocks are the structures for standalone procedures and functions, anonymous blocks, subprograms, triggers, packages, and object type methods. -The phrase *the identifier is local to the block* means that the identifier (that is, a variable, cursor, type, or subprogram) is declared in the declaration section of that block and is therefore accessible by the SPL code in the executable section and optional exception section of that block. +- The phrase *the identifier is local to the block* means that the identifier (that is, a variable, cursor, type, or subprogram) is declared in the declaration section of that block and is therefore accessible by the SPL code in the executable section and optional exception section of that block. + +## Declaring a subfunction ```sql FUNCTION [ () ] @@ -37,7 +39,7 @@ Where: - `statements` are SPL program statements. The `BEGIN - END` block can contain an `EXCEPTION` section. -## Examples +## Example: Recursive subfunction This example shows the use of a recursive subfunction: diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/03_block_relationships.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/03_block_relationships.mdx index 4e91beb62dc..5159773d207 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/03_block_relationships.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/03_block_relationships.mdx @@ -1,5 +1,5 @@ --- -title: "Block relationships" +title: "Declaring block relationships" redirects: - ../../../../../epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/03_block_relationships #generated for docs/epas/reorg-role-use-case-mode --- @@ -8,6 +8,8 @@ redirects: You can declare the relationship between blocks in an SPL program. The ability to invoke subprograms and access identifiers declared in a block depends on this relationship. +## About block relationships + The following are the basic terms: - A *block* is the basic SPL structure consisting of an optional declaration section, a mandatory executable section, and an optional exception section. Blocks implement standalone procedure and function programs, anonymous blocks, triggers, packages, and subprocedures and subfunctions. @@ -19,6 +21,8 @@ The following are the basic terms: - The *level* is an ordinal number of a given block from the highest ancestor block. For example, given a standalone procedure, the subprograms declared in the declaration section of this procedure are all at the same level, such as level 1. Additional subprograms in the declaration section of the subprograms declared in the standalone procedure are at the next level, that is, level 2. - The *sibling blocks* are the set of blocks that have the same parent block, that is, they are all locally declared in the same block. Sibling blocks are at the same level relative to each other. +## Example + The following schematic of a set of procedure declaration sections provides an example of a set of blocks and their relationships to their surrounding blocks. The two vertical lines on the left-hand side of the blocks indicate there are two pairs of sibling blocks. `block_1a` and `block_1b` is one pair, and `block_2a` and `block_2b` is the second pair. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/04_invoking_subprograms.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/04_invoking_subprograms.mdx index ff0164e8867..784db57fc4e 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/04_invoking_subprograms.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/04_invoking_subprograms.mdx @@ -10,11 +10,14 @@ Invoke a subprogram in the same manner as a standalone procedure or function by You can invoke the subprogram with zero, one, or more qualifiers. Qualifiers are the names of the parent subprograms or labeled anonymous blocks forming the ancestor hierarchy from which the subprogram was declared. +## Overview of subprograms + Invoke the subprogram using a dot-separated list of qualifiers ending with the subprogram name and any of its arguments: ```text [[.][...].] [()] ``` +### Specifying qualifiers If specified, `qualifier_n` is the subprogram in which `subprog` was declared in its declaration section. The preceding list of qualifiers must reside in a continuous path up the hierarchy from `qualifier_n` to `qualifier_1`. `qualifier_1` can be any ancestor subprogram in the path as well as any of the following: @@ -29,6 +32,8 @@ If specified, `qualifier_n` is the subprogram in which `subprog` was declared in `arguments` is the list of actual parameters to pass to the subprocedure or subfunction. +### Searching for subprograms + When you invoke the subprogram, the search for the subprogram occurs as follows: - The invoked subprogram name of its type (that is, subprocedure or subfunction) along with any qualifiers in the specified order (referred to as the invocation list) is used to find a matching set of blocks residing in the same hierarchical order. The search begins in the block hierarchy where the lowest level is the block from where the subprogram is invoked. The declaration of the subprogram must be in the SPL code prior to the code line where it's invoked when the code is observed from top to bottom. (You can achieve an exception to this requirement using a forward declaration. See [Using forward declarations](05_using_forward_declarations/#using_forward_declarations).) diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/06_overloading_subprograms.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/06_overloading_subprograms.mdx index 3362af51a0a..58eb52c0d8c 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/06_overloading_subprograms.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/06_overloading_subprograms.mdx @@ -8,13 +8,15 @@ redirects: Generally, subprograms of the same type (subprocedure or subfunction) with the same name and same formal parameter specification can appear multiple times in the same standalone program as long as they aren't sibling blocks (that is, the subprograms aren't declared in the same local block). -You can invoke aach subprogram individually depending on the use of qualifiers and the location where the subprogram invocation is made. +You can invoke each subprogram individually depending on the use of qualifiers and the location where the subprogram invocation is made. However, it's possible to declare subprograms, even as siblings, that are of the same subprogram type and name as long as certain aspects of the formal parameters differ. These characteristics (subprogram type, name, and formal parameter specification) are generally known as a program’s *signature*. The declaration of multiple subprograms where the signatures are identical except for certain aspects of the formal parameter specification is referred to as subprogram *overloading*. -Thus, the particular overloaded subprogram to invoke is determined by a match of the actual parameters specified by the subprogram invocation and the formal parameter lists of the overloaded subprograms. +## Requirements + +The particular overloaded subprogram to invoke is determined by a match of the actual parameters specified by the subprogram invocation and the formal parameter lists of the overloaded subprograms. Any of the following differences permit overloaded subprograms: @@ -29,7 +31,9 @@ The following differences alone don't permit overloaded subprograms: One of the differences allowing overloaded subprograms is different data types. -However, certain data types have alternative names referred to as *aliases*, which can be used for the table definition. +## Using aliases + +Certain data types have alternative names referred to as *aliases*, which can be used for the table definition. For example, you can specify fixed-length character data types as `CHAR` or `CHARACTER`. You can specify variable-length character data types as `CHAR VARYING`, `CHARACTER VARYING`, `VARCHAR`, or `VARCHAR2`. For integers, there are `BINARY_INTEGER`, `PLS_INTEGER`, and `INTEGER` data types. For numbers, there are `NUMBER`, `NUMERIC`, `DEC`, and `DECIMAL` data types. @@ -39,7 +43,9 @@ Thus, when attempting to create overloaded subprograms, the formal parameter dat You can determine if certain data types are aliases of other types by displaying the table definition containing the data types. -For example, the following table definition contains some data types and their aliases: +## Example: Data types and aliases + +The following table definition contains some data types and their aliases: ```sql CREATE TABLE data_type_aliases ( @@ -99,6 +105,8 @@ When attempting to declare overloaded subprograms, a pair of formal parameter da For certain pairs of data types used for overloading, you might need to cast the arguments specified by the subprogram invocation to avoid an error encountered during runtime of the subprogram. Invoking a subprogram must include the actual parameter list that can specifically identify the data types. Certain pairs of overloaded data types might require the `CAST` function to explicitly identify data types. For example, pairs of overloaded data types that might require casting during the invocation are `CHAR` and `VARCHAR2`, or `NUMBER` and `REAL`. +## Example: Overloaded subfunctions + This example shows a group of overloaded subfunctions invoked from an anonymous block. The executable section of the anonymous block contains the use of the `CAST` function to invoke overloaded functions with certain data types. ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables.mdx index e9f12635f06..c12de40b1df 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables.mdx @@ -17,6 +17,8 @@ Accessing variables includes not only those defined as a data type but also incl At most one qualifier can access the variable. The qualifier is the name of the subprogram or labeled anonymous block in which the variable was locally declared. +## Syntax + The syntax to reference a variable is: ```text @@ -34,6 +36,8 @@ schema_name.package_name.public_variable_name For more information about supported package syntax, see [Built-in packages](/epas_compat_bip_guide/03_built-in_packages/). +## Requirements + You can access variables in the following ways: - Variables can be accessed as long as the block in which the variable was locally declared is in the ancestor hierarchical path starting from the block containing the reference to the variable. Such variables declared in ancestor blocks are referred to as *global variables*. @@ -48,6 +52,8 @@ You can't access the following location of variables relative to the block from !!! Note The EDB Postgres Advanced Server process for accessing variables isn't compatible with Oracle databases. For Oracle, you can specify any number of qualifiers, and the search is based on the first match of the first qualifier in a similar manner to the Oracle matching algorithm for invoking subprograms. +## Example: Accessing variables in blocks + This example shows how variables in various blocks are accessed, with and without qualifiers. The lines that are commented out show attempts to access variables that result in an error. ```sql @@ -125,6 +131,8 @@ BLOCK level_0 END BLOCK level_0 ``` +## Accessing variables with the same name + This example shows similar access attempts when all variables in all blocks have the same name: ```sql @@ -196,6 +204,8 @@ BLOCK level_0 END BLOCK level_0 ``` +## Using labels to qualify access to variables + You can also use the labels on anonymous blocks to qualify access to variables. This example shows variable access in a set of nested anonymous blocks: ```sql @@ -253,6 +263,7 @@ BLOCK level_0 .. END BLOCK level_1b END BLOCK level_0 ``` +## Example Accessing record types in parent blocks This example is an object type whose object type method, `display_emp`, contains the record type `emp_typ` and the subprocedure `emp_sal_query`. The record variable `r_emp` declared locally to `emp_sal_query` can access the record type `emp_typ` declared in the parent block `display_emp`. @@ -324,6 +335,7 @@ Salary : 950.00 Dept # : 30 Employee's salary does not exceed the department average of 1566.67 ``` +## Example: Accessing an upper-level procedure This example is a package with three levels of subprocedures. A record type, collection type, and cursor type declared in the upper-level procedure can be accessed by the descendent subprocedure. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/index.mdx index d24d5b02ff9..1a9615e56c8 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/index.mdx @@ -1,5 +1,6 @@ --- title: "Subprograms: subprocedures and subfunctions" +indexCards: simple redirects: - ../../../../epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions #generated for docs/epas/reorg-role-use-case-mode --- diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/index.mdx index 85822bf94cf..e17b3f721b9 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/index.mdx @@ -1,5 +1,6 @@ --- title: "Program security" +indexCards: simple legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.056.html" diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/01_declaring_a_variable.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/01_declaring_a_variable.mdx index 6aa193e77ca..a59fa585815 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/01_declaring_a_variable.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/01_declaring_a_variable.mdx @@ -8,6 +8,8 @@ redirects: Generally, you must declare all variables used in a block in the declaration section of the block. A variable declaration consists of a name that's assigned to the variable and its data type. Optionally, you can initialize the variable to a default value in the variable declaration. +## Syntax + The general syntax of a variable declaration is: ```sql @@ -22,6 +24,8 @@ The general syntax of a variable declaration is: The default value is evaluated every time the block is entered. So, for example, assigning `SYSDATE` to a variable of type `DATE` causes the variable to have the time of the current invocation, not the time when the procedure or function was precompiled. +## Example: Variable declarations that use defaults + This procedure shows some variable declarations that use defaults consisting of string and numeric expressions: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/02_using__type_in_variable_declarations.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/02_using__type_in_variable_declarations.mdx index 28a88d1aa87..e0d92f19c8e 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/02_using__type_in_variable_declarations.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/02_using__type_in_variable_declarations.mdx @@ -19,6 +19,8 @@ Instead of coding the specific column data type into the variable declaration, y !!! Note You can use the `%TYPE` attribute with formal parameter declarations as well. +## Syntax + ```sql { {
| }. | }%TYPE; ``` @@ -30,6 +32,8 @@ Instead of coding the specific column data type into the variable declaration, y !!! Note The variable doesn't inherit any of the column’s other attributes that you specify on the column with the `NOT NULL` clause or the `DEFAULT` clause. +## Example: Defining parameters using %TYPE + In this example, a procedure: - Queries the `emp` table using an employee number diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx index f0fce6140c3..77d6e812c82 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx @@ -14,6 +14,8 @@ The `%TYPE` attribute provides an easy way to create a variable that depends on A *record* is a named, ordered collection of fields. A *field* is similar to a variable. It has an identifier and data type but has the additional property of belonging to a record. You must refereence it using dot notation with the record name as its qualifier. +## Sytax + You can use the `%ROWTYPE` attribute to declare a record. The `%ROWTYPE` attribute is prefixed by a table name. Each column in the named table defines an identically named field in the record with the same data type as the column. ```text @@ -23,6 +25,8 @@ You can use the `%ROWTYPE` attribute to declare a record. The `%ROWTYPE` attribu - `record` is an identifier assigned to the record. - `table` is the name of a table or view whose columns define the fields in the record. +## Example + This example shows how you can modify the `emp_sal_query` procedure from [Using %TYPE in variable declarations](02_using__type_in_variable_declarations) to use `emp%ROWTYPE` to create a record named `r_emp` instead of declaring individual variables for the columns in `emp`: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/04_user_defined_record_types_and_record_variables.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/04_user_defined_record_types_and_record_variables.mdx index b8ef610e80e..4176a8ad9df 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/04_user_defined_record_types_and_record_variables.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/04_user_defined_record_types_and_record_variables.mdx @@ -14,6 +14,8 @@ You can declare records based on a table definition using the `%ROWTYPE` attribu You use the `TYPE IS RECORD` statement to create the definition of a record type. A *record type* is a definition of a record made up of one or more identifiers and their corresponding data types. You can't use a record type by itself to manipulate data. +## Syntax + The syntax for a `TYPE IS RECORD` statement is: ```sql @@ -51,6 +53,8 @@ Use dot notation to reference the fields in the record: `record` is a previously declared record variable and `field` is the identifier of a field belonging to the record type from which `record` is defined. +## Example + This `emp_sal_query` procedure uses a user-defined record type and record variable: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/03_pragma_autonomous_transaction.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/03_pragma_autonomous_transaction.mdx index f30972c5b31..6a30586eb01 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/03_pragma_autonomous_transaction.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/03_pragma_autonomous_transaction.mdx @@ -6,7 +6,7 @@ redirects: -An SPL program can be declared as an *autonomous transaction* by specifying the following directive in the declaration section of the SPL block. An autonomous transaction is an independent transaction started by a calling program. +A stored procedural language (SPL) program can be declared as an *autonomous transaction* by specifying the following directive in the declaration section of the SPL block. An autonomous transaction is an independent transaction started by a calling program. ```sql PRAGMA AUTONOMOUS_TRANSACTION; @@ -14,6 +14,8 @@ PRAGMA AUTONOMOUS_TRANSACTION; A commit or rollback of SQL commands in the autonomous transaction has no effect on the commit or rollback in any transaction of the calling program. A commit or rollback in the calling program has no effect on the commit or rollback of SQL commands in the autonomous transaction. +## Requirements and restrictions + The following SPL programs can include `PRAGMA AUTONOMOUS_TRANSACTION`: - Standalone procedures and functions @@ -30,6 +32,8 @@ The following are issues and restrictions related to autonomous transactions: - Parallel query isn't supported in autonomous transactions. - The EDB Postgres Advanced Server implementation of autonomous transactions isn't entirely compatible with Oracle databases. The EDB Postgres Advanced Server autonomous transaction doesn't produce an error if there's an uncommitted transaction at the end of an SPL block. +## About the examples + The following set of examples use autonomous transactions. This first set of scenarios shows the default behavior when there are no autonomous transactions. Before each scenario, the `dept` table is reset to the following initial values: diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/about_transactions.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/about_transactions.mdx index 6036a93692a..42cc6364f32 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/about_transactions.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/06_transaction_control/about_transactions.mdx @@ -2,12 +2,14 @@ title: "About transactions" --- -A transaction begins when the first SQL command is encountered in the SPL program. All subsequent SQL commands are included as part of that transaction. The transaction ends when one of the following occurs: +A transaction begins when the first SQL command is encountered in the SPL program. All subsequent SQL commands are included as part of that transaction. -- An unhandled exception occurs. In this case, the effects of all database updates made during the transaction are rolled back, and the transaction is aborted. -- A `COMMIT` command is encountered. In this case, the effect of all database updates made during the transaction become permanent. -- A `ROLLBACK` command is encountered. In this case, the effects of all database updates made during the transaction are rolled back, and the transaction is aborted. If a new SQL command is encountered, a new transaction begins. -- Control returns to the calling application (such as Java or PSQL). In this case, the action of the application determines whether the transaction is committed or rolled back. The exception is when the transaction is in a block in which `PRAGMA AUTONOMOUS_TRANSACTION` was declared. In this case, the commitment or rollback of the transaction occurs independently of the calling program. +The transaction ends when one of the following occurs: + +- **An unhandled exception occurs**. In this case, the effects of all database updates made during the transaction are rolled back, and the transaction is aborted. +- **A `COMMIT` command is encountered**. In this case, the effect of all database updates made during the transaction become permanent. +- **A `ROLLBACK` command is encountered**. In this case, the effects of all database updates made during the transaction are rolled back, and the transaction is aborted. If a new SQL command is encountered, a new transaction begins. +- **Control returns to the calling application** (such as Java or PSQL). In this case, the action of the application determines whether the transaction is committed or rolled back. The exception is when the transaction is in a block in which `PRAGMA AUTONOMOUS_TRANSACTION` was declared. In this case, the commitment or rollback of the transaction occurs independently of the calling program. !!! Note Unlike Oracle, DDL commands such as `CREATE TABLE` don't implicitly occur in their own transaction. Therefore, DDL commands don't cause an immediate database commit as in Oracle, and you can roll back DDL commands just like DML commands. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/07_dynamic_sql.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/07_dynamic_sql.mdx index 3eb7319cf74..e4ca03e2561 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/07_dynamic_sql.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/07_dynamic_sql.mdx @@ -16,6 +16,8 @@ In addition, dynamic SQL is the only method by which data definition commands, s However, the runtime performance of dynamic SQL is slower than static SQL. +## Syntax + The `EXECUTE IMMEDIATE` command is used to run SQL commands dynamically: ```sql @@ -41,6 +43,8 @@ Placeholders aren't declared anywhere in the SPL program. They appear only in `s Currently all options for `bind_type` are ignored and `bind_argument` is treated as `IN OUT`. +## Example: SQL commands as string literals + This example shows basic dynamic SQL commands as string literals: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/08_static_cursors/02_opening_a_cursor.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/08_static_cursors/02_opening_a_cursor.mdx index 834053ea903..61802f46a82 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/08_static_cursors/02_opening_a_cursor.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/08_static_cursors/02_opening_a_cursor.mdx @@ -14,7 +14,7 @@ OPEN ; `name` is the identifier of a cursor that was previously declared in the declaration section of the SPL program. Don't execute the `OPEN` statement on a cursor that is already open. -This examples shows an `OPEN` statement with its corresponding cursor declaration: +This example shows an `OPEN` statement with its corresponding cursor declaration: ```sql CREATE OR REPLACE PROCEDURE cursor_example diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/08_static_cursors/03_fetching_rows_from_a_cursor.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/08_static_cursors/03_fetching_rows_from_a_cursor.mdx index 76e91479a37..dbaff87039b 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/08_static_cursors/03_fetching_rows_from_a_cursor.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/08_static_cursors/03_fetching_rows_from_a_cursor.mdx @@ -8,6 +8,8 @@ redirects: Once a cursor is open, you can retrieve rows from the cursor’s result set by using the `FETCH` statement. +## Syntax + ```sql FETCH INTO { | [, ]... }; ``` @@ -22,6 +24,8 @@ Where: !!! Note A variation of `FETCH INTO` using the `BULK COLLECT` clause can return multiple rows at a time into a collection. See [Using the BULK COLLECT clause](../12_working_with_collections/04_using_the_bulk_collect_clause/#using_the_bulk_collect_clause) for more information on using the `BULK COLLECT` clause with the `FETCH INTO` statement. +## Example + The following shows the `FETCH` statement: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/index.mdx index 572097892fd..3b4b859a1c2 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable/index.mdx @@ -1,5 +1,6 @@ --- title: "Declaring a cursor variable" +indexCards: simple redirects: - ../../../../epas_compat_spl/09_ref_cursors_and_cursor_variables/02_declaring_a_cursor_variable #generated for docs/epas/reorg-role-use-case-mode --- diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/index.mdx index 022771fa0ec..aeea7c9a1ff 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples/index.mdx @@ -1,5 +1,6 @@ --- title: "Cursor variable examples" +indexCards: simple redirects: - ../../../../epas_compat_spl/09_ref_cursors_and_cursor_variables/07_examples #generated for docs/epas/reorg-role-use-case-mode --- diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/about_collections.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/about_collections.mdx index d5a36eceae6..312f99a6310 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/about_collections.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/about_collections.mdx @@ -1,5 +1,5 @@ --- -title: "Using collections" +title: "About collection types" --- The most commonly known type of collection is an array. In EDB Postgres Advanced Server, the supported collection types are: @@ -7,15 +7,21 @@ The most commonly known type of collection is an array. In EDB Postgres Advanced - [Nested tables](02_nested_tables) - [Varrays](03_varrays) -## Steps for using a collection +## Defining the collection type -The general steps for using a collection are the following: +To set up a collection: + +1. Define a collection of the desired type. You can do this in the declaration section of an SPL program, which results in a *local type* that you can access only in that program. For nested table and varray types, you can also do this using the `CREATE TYPE` command, which creates a persistent, *standalone type* that any SPL program in the database can reference. +2. Declare variables of the collection type. The collection associated with the declared variable is *uninitialized* at this point if no value assignment is made as part of the variable declaration. + +## Initializing a null collection -- Define a collection of the desired type. You can do this in the declaration section of an SPL program, which results in a *local type* that you can access only in that program. For nested table and varray types, you can also do this using the `CREATE TYPE` command, which creates a persistent, *standalone type* that any SPL program in the database can reference. -- Declare variables of the collection type. The collection associated with the declared variable is *uninitialized* at this point if no value assignment is made as part of the variable declaration. - Uninitialized collections of nested tables and varrays are null. A *null collection* doesn't yet exist. Generally, a `COLLECTION_IS_NULL` exception is thrown if a collection method is invoked on a null collection. -- Uninitialized collections of associative arrays exist but have no elements. An existing collection with no elements is called an *empty collection*. - To initialize a null collection, you must either make it an empty collection or assign a non-null value to it. Generally, a null collection is initialized by using its *constructor*. + +## Adding elements to an associative array + +- Uninitialized collections of associative arrays exist but have no elements. An existing collection with no elements is called an *empty collection*. - To add elements to an empty associative array, you can assign values to its keys. For nested tables and varrays, generally its constructor is used to assign initial values to the nested table or varray. For nested tables and varrays, you then use the `EXTEND` method to grow the collection beyond its initial size set by the constructor. ## Limitations diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods.mdx index 2ec0e8d7705..a2d3b8790c9 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/02_methods.mdx @@ -8,6 +8,6 @@ redirects: Methods are SPL procedures or functions defined in an object type. Methods are categorized into three general types: -- Member methods — Procedures or functions that operate in the context of an object instance. Member methods have access to and can change the attributes of the object instance on which they're operating. -- Static methods — Procedures or functions that operate independently of any particular object instance. Static methods don't have access to and can't change the attributes of an object instance. -- Constructor methods — Functions used to create an instance of an object type. A default constructor method is always provided when an object type is defined. +- *Member methods* — Procedures or functions that operate in the context of an object instance. Member methods have access to and can change the attributes of the object instance on which they're operating. +- *Static methods* — Procedures or functions that operate independently of any particular object instance. Static methods don't have access to and can't change the attributes of an object instance. +- *Constructor methods* — Functions used to create an instance of an object type. A default constructor method is always provided when an object type is defined. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/index.mdx index b90a91530df..b9fac2f8058 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/01_basic_object_concepts/index.mdx @@ -1,5 +1,6 @@ --- title: "Basic object concepts" +indexCards: simple legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.091.html" @@ -12,8 +13,8 @@ redirects: An object type is a description or definition of some entity. This definition of an object type is characterized by two components: -- Attributes — Fields that describe particular characteristics of an object instance. For a person object, examples are name, address, gender, date of birth, height, weight, eye color, and occupation. -- Methods — Programs that perform some type of function or operation on or are related to an object. For a person object, examples are calculating the person’s age, displaying the person’s attributes, and changing the values assigned to the person’s attributes. +- *Attributes* — Fields that describe particular characteristics of an object instance. For a person object, examples are name, address, gender, date of birth, height, weight, eye color, and occupation. +- *Methods* — Programs that perform some type of function or operation on or are related to an object. For a person object, examples are calculating the person’s age, displaying the person’s attributes, and changing the values assigned to the person’s attributes.
diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/01_object_type_specification_syntax.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/01_object_type_specification_syntax.mdx index a954e2d2a1c..1355b8d0963 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/01_object_type_specification_syntax.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/01_object_type_specification_syntax.mdx @@ -74,6 +74,8 @@ RETURN ; If you omit the `AUTHID` clause or specify `DEFINER`, the rights of the object type owner are used to determine access privileges to database objects. If you specify `CURRENT_USER`, the rights of the current user executing a method in the object determine access privileges. +## Syntax + `attribute` is an identifier assigned to an attribute of the object type. `datatype` is a base data type. @@ -92,6 +94,8 @@ Following the closing parenthesis of the `CREATE TYPE` definition, `[ NOT ] INST !!! Note Even though specifying `NOT INSTANTIABLE` is accepted in the `CREATE TYPE` command, SPL doesn't currently support creating subtypes. +## method_spec + `method_spec` denotes the specification of a member method or static method. Before defining a method, use `[ NOT ] FINAL` to specify whether the method can be overridden in a subtype. `NOT FINAL` is the default, meaning the method can be overridden in a subtype. @@ -100,10 +104,14 @@ Before defining a method, specify `OVERRIDING` if the method overrides an identi Before defining a method, use `[ NOT ] INSTANTIABLE` to specify whether the object type definition provides an implementation for the method. If you specify `INSTANTIABLE`, then the `CREATE TYPE BODY` command for the object type must specify the implementation of the method. If you specify `NOT INSTANTIABLE`, then the `CREATE TYPE BODY` command for the object type must not contain the implementation of the method. In this latter case, it is assumed a subtype contains the implementation of the method, overriding the method in this object type. If there are any `NOT INSTANTIABLE` methods in the object type, then the object type definition must specify `NOT INSTANTIABLE` and `NOT FINAL` following the closing parenthesis of the object type specification. The default is `INSTANTIABLE`. +## subprogram_spec + `subprogram_spec` denotes the specification of a procedure or function and begins with the specification of either `MEMBER` or `STATIC`. A member subprogram must be invoked with respect to a particular object instance while a static subprogram isn't invoked with respect to any object instance. `proc_name` is an identifier of a procedure. If you specify the `SELF` parameter, `name` is the object type name given in the `CREATE TYPE` command. If specified, `parm1, parm2, …` are the formal parameters of the procedure. `datatype1, datatype2, …` are the data types of `parm1, parm2, …` respectively. `IN`, `IN OUT`, and `OUT` are the possible parameter modes for each formal parameter. The default is `IN`. `value1, value2, …` are default values that you can specify for `IN` parameters. +## CONSTRUCTOR + Include the `CONSTRUCTOR FUNCTION` keyword and function definition to define a constructor function. `func_name` is an identifier of a function. If specified, `parm1, parm2, …` are the formal parameters of the function. `datatype1, datatype2, …` are the data types of `parm1, parm2, …` respectively. `IN`, `IN OUT`, and `OUT` are the possible parameter modes for each formal parameter. The default is `IN`. `value1, value2, …` are default values that you can specify for `IN` parameters. `return_type` is the data type of the value the function returns. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/index.mdx index 1a95dcea28f..1d5f0770835 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/02_object_type_components/index.mdx @@ -1,5 +1,6 @@ --- title: "Object type components" +indexCards: simple legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.092.html" diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods.mdx index 6a268727036..fc3c6a532ee 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/03_constructor_methods.mdx @@ -80,6 +80,8 @@ BEGIN END; ``` -Custom constructor functions are typically used to compute member values when given incomplete information. The example computes the values for `city` and `state` when given a postal code. +Custom constructor functions are: -Custom constructor functions are also used to enforce business rules that restrict the state of an object. For example, if you define an object type to represent a `payment`, you can use a custom constructor to ensure that no object of type `payment` can be created with an `amount` that is `NULL`, negative, or zero. The default constructor sets `payment.amount` to `NULL`, so you must create a custom constructor whose signature matches the default constructor to prohibit `NULL` amounts. +- Typically used to compute member values when given incomplete information. The example computes the values for `city` and `state` when given a postal code. + +- Also used to enforce business rules that restrict the state of an object. For example, if you define an object type to represent a `payment`, you can use a custom constructor to ensure that no object of type `payment` can be created with an `amount` that is `NULL`, negative, or zero. The default constructor sets `payment.amount` to `NULL`, so you must create a custom constructor whose signature matches the default constructor to prohibit `NULL` amounts. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/index.mdx index f5a6ef5d192..aa29ca6a350 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/03_creating_object_types/index.mdx @@ -1,5 +1,6 @@ --- title: "Creating object types" +indexCards: simple legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.093.html" diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/04_creating_object_instances.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/04_creating_object_instances.mdx index b8638669b14..b4da55d2b35 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/04_creating_object_instances.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/04_creating_object_instances.mdx @@ -10,6 +10,8 @@ redirects: +## Creating an instance + To create an instance of an object type, you must first declare a variable of the object type and then initialize the declared object variable. The syntax for declaring an object variable is: ```text @@ -22,6 +24,8 @@ Where: `obj_type` is the identifier of a previously defined object type. +## Invoking a constructor method + After declaring the object variable, you must invoke a *constructor method* to initialize the object with values. Use the following syntax to invoke the constructor method: ```sql @@ -49,6 +53,8 @@ The variable `v_emp` is declared with a previously defined object type named `EM You can include the `NEW` keyword when creating a new instance of an object in the body of a block. The `NEW` keyword invokes the object constructor whose signature matches the arguments provided. +## Example + This example declares two variables named `mgr` and `emp`. The variables are both of `EMP_OBJ_TYPE`. The `mgr` object is initialized in the declaration, while the `emp` object is initialized to `NULL` in the declaration and assigned a value in the body. ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/05_referencing_an_object.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/05_referencing_an_object.mdx index e4f9f2b6e7a..d18126cc7aa 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/05_referencing_an_object.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/05_referencing_an_object.mdx @@ -10,6 +10,8 @@ redirects: +## Syntax + After you create and initialize an object variable, you can reference individual attributes using dot notation of the form: ```text @@ -30,6 +32,8 @@ If `attribute` is of an object type, then the reference must take the form: Where `attribute_inner` is an identifier belonging to the object type to which `attribute` references in its definition of `object`. +## Examples + This example displays the values assigned to the `emp_obj_typ` object: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/06_dropping_an_object_type.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/06_dropping_an_object_type.mdx index c92aabb2215..bc2b838c709 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/06_dropping_an_object_type.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/15_object_types_and_objects/06_dropping_an_object_type.mdx @@ -10,6 +10,9 @@ redirects: + +## Deleting an object type + The syntax for deleting an object type is as follows: ```sql @@ -27,6 +30,8 @@ DROP TYPE emp_obj_typ; DROP TYPE addr_obj_typ; ``` +## Dropping only the object type body + The syntax for deleting an object type body but not the object type specification is: ```sql diff --git a/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx b/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx index 3a8a1f9bea5..34e714426d2 100644 --- a/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx +++ b/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx @@ -7,41 +7,41 @@ description: "Describes how to take a snapshot of system performance data" EDB Postgres Advanced Server's `postgresql.conf` file includes a configuration parameter named `timed_statistics` that controls collecting timing data. The valid parameter values are `TRUE` or `FALSE`. The default value is `FALSE`. -`timed_statistics` is a dynamic parameter that you can modify in the `postgresql.conf` file or while a session is in progress. To enable DRITA, you must either: +`timed_statistics` is a dynamic parameter that you can modify in the `postgresql.conf` file or while a session is in progress. -- Modify the `postgresql.conf` file, setting the `timed_statistics` parameter to `TRUE`. +To enable DRITA: -- Connect to the server with the EDB-PSQL client and invoke the command: +1. Do one of the following: - Connect to the server with the EDB-PSQL client, and invoke the command: + - Modify the `postgresql.conf` file, setting the `timed_statistics` parameter to `TRUE`. -```sql -SET timed_statistics = TRUE -``` + - Connect to the server with the EDB-PSQL client and invoke the command: -After modifying the `timed_statistics` parameter, take a starting snapshot. A snapshot captures the current state of each timer and event counter. The server compares the starting snapshot to a later snapshot to gauge system performance. + ```sql + SET timed_statistics = TRUE + ``` -Use the `edbsnap()` function to take the beginning snapshot: +2. After modifying the `timed_statistics` parameter, take a starting snapshot. A snapshot captures the current state of each timer and event counter. The server compares the starting snapshot to a later snapshot to gauge system performance. Use the `edbsnap()` function to take the beginning snapshot: -```sql -edb=# SELECT * FROM edbsnap(); -__OUTPUT__ - edbsnap ----------------------- - Statement processed. -(1 row) -``` + ```sql + edb=# SELECT * FROM edbsnap(); + __OUTPUT__ + edbsnap + ---------------------- + Statement processed. + (1 row) + ``` -Then, run the workload that you want to evaluate. When the workload is complete or at a strategic point during the workload, take another snapshot: +3. Run the workload that you want to evaluate. When the workload is complete or at a strategic point during the workload, take another snapshot: -```sql -edb=# SELECT * FROM edbsnap(); -__OUTPUT__ - edbsnap ----------------------- - Statement processed. -(1 row) -``` + ```sql + edb=# SELECT * FROM edbsnap(); + __OUTPUT__ + edbsnap + ---------------------- + Statement processed. + (1 row) + ``` You can capture multiple snapshots during a session. Then, use the DRITA functions and reports to manage and compare the snapshots to evaluate performance information. From bdbd08a06d25b767da82effc49fb4c8d04b8489b Mon Sep 17 00:00:00 2001 From: francoughlin Date: Mon, 7 Aug 2023 16:09:59 -0400 Subject: [PATCH 03/38] Remaining edits for the Application programming branch Also some additional restructuring --- .../02_packages/01_package_components.mdx | 4 +- .../02_packages/01a_display_packages.mdx | 6 +- .../02_packages/02_creating_packages.mdx | 2 +- ...using_packages_with_user_defined_types.mdx | 5 + .../ecpgplus_guide/02_overview.mdx | 143 +------------- .../ecpgplus_guide/03_using_embedded_sql.mdx | 6 + .../ecpgplus_guide/04_using_descriptors.mdx | 4 + .../ecpgplus_guide/index.mdx | 7 + .../ecpgplus_guide/installing_ecpgplus.mdx | 145 ++++++++++++++ .../index.mdx | 4 +- .../10_collections/01_associative_arrays.mdx | 10 + .../10_collections/02_nested_tables.mdx | 24 ++- .../10_collections/03_varrays.mdx | 8 + .../12_working_with_collections/01_table.mdx | 2 +- .../02_using_the_multiset_union_operator.mdx | 4 + .../03_using_the_forall_statement.mdx | 16 +- .../04_returning_bulk_collect.mdx | 4 + .../index.mdx | 1 + .../05_errors_and_messages.mdx | 2 + .../13_triggers/02_types_of_triggers.mdx | 12 +- .../13_triggers/03_creating_triggers.mdx | 10 +- .../13_triggers/04_trigger_variables.mdx | 10 +- .../13_triggers/06_compound_triggers.mdx | 10 +- .../06_compound_trigger.mdx | 180 ++++++++++-------- .../13_triggers/07_trigger_examples/index.mdx | 1 + .../taking_a_snapshot.mdx | 6 +- 26 files changed, 366 insertions(+), 260 deletions(-) create mode 100644 product_docs/docs/epas/15/application_programming/ecpgplus_guide/installing_ecpgplus.mdx diff --git a/product_docs/docs/epas/15/application_programming/02_packages/01_package_components.mdx b/product_docs/docs/epas/15/application_programming/02_packages/01_package_components.mdx index 7efd35f1433..46ef2c4fc77 100644 --- a/product_docs/docs/epas/15/application_programming/02_packages/01_package_components.mdx +++ b/product_docs/docs/epas/15/application_programming/02_packages/01_package_components.mdx @@ -11,8 +11,8 @@ redirects: Packages consist of two main components: -- The package specification, which is the public interface. You can reference these elements outside the package. Declare all database objects that are a part of a package in the specification. -- The package body, which contains the actual implementation of all the database objects declared in the package specification. +- The *package specification*, which is the public interface. You can reference these elements outside the package. Declare all database objects that are a part of a package in the specification. +- The *package body*, which contains the actual implementation of all the database objects declared in the package specification. The package body implements the specifications in the package specification. It contains implementation details and private declarations that are invisible to the application. You can debug, enhance, or replace a package body without changing the specifications. Similarly, you can change the body without recompiling the calling programs because the implementation details are invisible to the application. diff --git a/product_docs/docs/epas/15/application_programming/02_packages/01a_display_packages.mdx b/product_docs/docs/epas/15/application_programming/02_packages/01a_display_packages.mdx index de877f51587..1ae99f419fd 100644 --- a/product_docs/docs/epas/15/application_programming/02_packages/01a_display_packages.mdx +++ b/product_docs/docs/epas/15/application_programming/02_packages/01a_display_packages.mdx @@ -14,7 +14,7 @@ You can view the package specification and package body definition using the psq \spb[+] []. ``` -## Examples +## Create and viewing a package and a package body Create a package and a package body `test_pkg` in the `public` schema: @@ -99,9 +99,9 @@ END edb=# ``` -You can also view the definition of individual functions and procedures using the `\sf` command. +## Viewing function and procedure definitions -### Examples +You can also view the definition of individual functions and procedures using the `\sf` command. Create the function and procedure: diff --git a/product_docs/docs/epas/15/application_programming/02_packages/02_creating_packages.mdx b/product_docs/docs/epas/15/application_programming/02_packages/02_creating_packages.mdx index 5408df08115..3a620debc8a 100644 --- a/product_docs/docs/epas/15/application_programming/02_packages/02_creating_packages.mdx +++ b/product_docs/docs/epas/15/application_programming/02_packages/02_creating_packages.mdx @@ -13,7 +13,7 @@ A package isn't an executable piece of code but a repository of code. When you u ## Creating the package specification -The package specification contains the definition of all the elements in the package that you can reference from outside of the package. These are called the public elements of the package, and they act as the package interface. The following code sample is a package specification: +The package specification contains the definition of all the elements in the package that you can reference from outside of the package. These are called the *public elements* of the package, and they act as the package interface. The following code sample is a package specification: ```sql -- diff --git a/product_docs/docs/epas/15/application_programming/02_packages/04_using_packages_with_user_defined_types.mdx b/product_docs/docs/epas/15/application_programming/02_packages/04_using_packages_with_user_defined_types.mdx index 5add0cd7fb2..b3793badaa6 100644 --- a/product_docs/docs/epas/15/application_programming/02_packages/04_using_packages_with_user_defined_types.mdx +++ b/product_docs/docs/epas/15/application_programming/02_packages/04_using_packages_with_user_defined_types.mdx @@ -11,6 +11,8 @@ redirects: This example incorporates various user-defined types in the context of a package. +## Package specification + The package specification of `emp_rpt` shows the declaration of a record type `emprec_typ` and a weakly typed `REF CURSOR, emp_refcur` as publicly accessible. It also shows two functions and two procedures. The function, `open_emp_by_dept`, returns the `REF CURSOR` type `EMP_REFCUR`. Procedures `fetch_emp` and `close_refcur` both declare a weakly typed `REF CURSOR` as a formal parameter. ```sql @@ -37,6 +39,8 @@ IS END emp_rpt; ``` +## Package body + The package body shows the declaration of several private variables: a static cursor `dept_cur`, a table type `depttab_typ`, a table variable `t_dept`, an integer variable `t_dept_max`, and a record variable `r_emp`. ```sql @@ -110,6 +114,7 @@ This package contains an initialization section that loads the private table var The function `open_emp_by_dept` returns a `REF CURSOR` variable for a result set of employee numbers and names for a given department. This `REF CURSOR` variable can then be passed to the procedure `fetch_emp` to retrieve and list the individual rows of the result set. Finally, the procedure `close_refcur` can be used to close the `REF CURSOR` variable associated with this result set. +## Using anonymous blocks The following anonymous block runs the package function and procedures. In the anonymous block's declaration section, note the declaration of cursor variable `v_emp_cur` using the package’s public `REF CURSOR` type, `EMP_REFCUR. v_emp_cur` contains the pointer to the result set that's passed between the package function and procedures. ```sql diff --git a/product_docs/docs/epas/15/application_programming/ecpgplus_guide/02_overview.mdx b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/02_overview.mdx index 7f49195908f..5311313ed9a 100644 --- a/product_docs/docs/epas/15/application_programming/ecpgplus_guide/02_overview.mdx +++ b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/02_overview.mdx @@ -15,6 +15,8 @@ redirects: EDB enhanced ECPG (the PostgreSQL precompiler) to create ECPGPlus. ECPGPlus is a Pro\*C-compatible version of the PostgreSQL C precompiler. ECPGPlus translates a program that combines C code and embedded SQL statements into an equivalent C program. As it performs the translation, ECPGPlus verifies that the syntax of each SQL construct is correct. +## About ECPGPlus + The following diagram charts the path of a program containing embedded SQL statements as it's compiled into an executable: ![Compilation of a program containing embedded SQL statements](../../images/ecpg_path.png) @@ -121,143 +123,4 @@ printf("Updating employee salaries\n"); } printf(“Employee salaries updated\n”); -``` - -## Installation and configuration - -On Windows, ECPGPlus is installed by the EDB Postgres Advanced Server installation wizard as part of the Database Server component. On Linux, install with the `edb-asxx-server-devel` RPM package, where `xx` is the EDB Postgres Advanced Server version number. By default, the executable is located on Windows in: - -```text -C:\Program Files\edb\as14\bin -``` - -On Linux, it's located in: - -```text -/usr/edb/as14/bin -``` - -When invoking the ECPGPlus compiler, the executable must be in your search path (`%PATH%` on Windows, `$PATH` on Linux). For example, the following commands set the search path to include the directory that holds the ECPGPlus executable file `ecpg`. - -On Windows: - -```shell -set EDB_PATH=C:\Program Files\edb\as14\bin -set PATH=%EDB_PATH%;%PATH% -``` - -On Linux: - -```shell -export EDB_PATH==/usr/edb/as14/bin -export PATH=$EDB_PATH:$PATH -``` - -## Constructing a makefile - -A makefile contains a set of instructions that tell the make utility how to transform a program written in C that contains embedded SQL into a C program. To try the examples, you need: - -- A C compiler (and linker) -- The make utility -- ECPGPlus preprocessor and library -- A makefile that contains instructions for ECPGPlus - -The following code is an example of a makefile for the samples included in this documentation. To use the sample code, save it in a file named `makefile` in the directory that contains the source code file. - -```c -INCLUDES = -I$(shell pg_config --includedir) -LIBPATH = -L $(shell pg_config --libdir) -CFLAGS += $(INCLUDES) -g -LDFLAGS += -g -LDLIBS += $(LIBPATH) -lecpg -lpq - -.SUFFIXES: .pgc,.pc - -.pgc.c: - ecpg -c $(INCLUDES) $? - -.pc.c: - ecpg -C PROC -c $(INCLUDES) $? -``` - -The first two lines use the `pg_config` program to locate the necessary header files and library directories: - -```sql -INCLUDES = -I$(shell pg_config --includedir) -LIBPATH = -L $(shell pg_config --libdir) -``` - -The `pg_config` program is shipped with EDB Postgres Advanced Server. - -make knows to use the `CFLAGS` variable when running the C compiler and `LDFLAGS` and `LDLIBS` when invoking the linker. ECPG programs must be linked against the ECPG runtime library (`-lecpg`) and the libpq library (`-lpq`). - -```sql -CFLAGS += $(INCLUDES) -g -LDFLAGS += -g -LDLIBS += $(LIBPATH) -lecpg -lpq -``` - -The sample makefile tells make how to translate a `.pgc` or a `.pc` file into a C program. Two lines in the makefile specify the mode in which the source file is compiled. The first compile option is: - -```c -.pgc.c: - ecpg -c $(INCLUDES) $? -``` - -The first option tells make how to transform a file that ends in `.pgc` (presumably, an ECPG source file) into a file that ends in `.c` (a C program), using community ECPG, without the ECPGPlus enhancements. It invokes the ECPG precompiler with the `-c` flag, which instructs the compiler to convert SQL code into C, using the value of the `INCLUDES` variable and the name of the `.pgc` file. - -```c -.pc.c: - ecpg -C PROC -c $(INCLUDES) $? -``` - -The second option tells make how to transform a file that ends in `.pg` (an ECPG source file) into a file that ends in `.c` (a C program) using the ECPGPlus extensions. It invokes the ECPG precompiler with the `-c` flag, which instructs the compiler to convert SQL code to C. It also uses the `-C PROC` flag, which instructs the compiler to use ECPGPlus in Pro\*C-compatibility mode, using the value of the `INCLUDES` variable and the name of the `.pgc` file. - -When you run make, pass the name of the ECPG source code file you want to compile. For example, to compile an ECPG source code file named `customer_list.pgc`, use the command: - -```shell -make customer_list -``` - -The make utility: - -1. Consults the makefile located in the current directory. -1. Discovers that the makefile contains a rule that compiles `customer_list.pgc` into a C program (`customer_list.c`). -1. Uses the rules built into `make` to compile `customer_list.c` into an executable program. - -## ECPGPlus command line options - -In the sample makefile, make includes the `-C` option when invoking ECPGPlus to invoke ECPGPlus in Pro\*C-compatible mode. - -If you include the `-C` `PROC` keywords on the command line, in addition to the ECPG syntax, you can use Pro\*C command line syntax. For example: - -```shell -$ ecpg -C PROC INCLUDE=/usr/edb/as14/include acct_update.c -``` - -To display a complete list of the other ECPGPlus options available, in the ECPGPlus installation directory, enter: - -```shell -./ecpg --help -``` - -The command line options are: - -| Option | Description | -| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| -c | Automatically generate C code from embedded SQL code. | -| -C *mode* | Use the `-C` option to specify a compatibility mode:

`INFORMIX`

`INFORMIX_SE`

`PROC` | -| -D *symbol* | Define a preprocessor *symbol*.

The *-D* keyword is not supported when compiling in *PROC mode.* Instead, use the Oracle-style *‘DEFINE=’* clause. | -| -h | Parse a header file, this option includes option `'-c'`. | -| -i | Parse system, include files as well. | -| -I directory | Search *directory* for `include` files. | -| -o *outfile* | Write the result to *outfile*. | -| -r *option* | Specify runtime behavior; *option* can be:

`no_indicator` - Don't use indicators, but instead use special values to represent NULL values.

`prepare` - Prepare all statements before using them.

`questionmarks` - Allow use of a question mark as a placeholder.

`usebulk` - Enable bulk processing for `INSERT`, `UPDATE`, and `DELETE` statements that operate on host variable arrays. | -| --regression | Run in regression testing mode. | -| -t | Turn on `autocommit` of transactions. | -| -l | Disable `#line` directives. | -| --help | Display the help options. | -| --version | Output version information. | - -!!! Note - If you don't specify an output file name when invoking ECPGPlus, the output file name is created by removing the `.pgc` extension from the file name and appending `.c`. +``` \ No newline at end of file diff --git a/product_docs/docs/epas/15/application_programming/ecpgplus_guide/03_using_embedded_sql.mdx b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/03_using_embedded_sql.mdx index 7bd55410dbf..c5f0200d15a 100644 --- a/product_docs/docs/epas/15/application_programming/ecpgplus_guide/03_using_embedded_sql.mdx +++ b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/03_using_embedded_sql.mdx @@ -275,6 +275,7 @@ The code sample begins by including the prototypes and type definitions for the int main(int argc, char *argv[]) { ``` +### DECLARE Next, the application declares a set of host variables used to interact with the database server: @@ -320,6 +321,8 @@ EXEC SQL DECLARE employees CURSOR FOR `employees` contains the result set of a `SELECT` statement on the `emp` table. The query returns employee information from the following columns: `empno`, `ename`, `sal`, and `comm`. Notice that when you declare a cursor, you don't include an `INTO` clause. Instead, you specify the target variables (or descriptors) when you `FETCH` from the cursor. +### OPEN + Before fetching rows from the cursor, the client application must `OPEN` the cursor: ```sql @@ -331,6 +334,7 @@ In the subsequent `FETCH` section, the client application loops through the cont ```sql EXEC SQL WHENEVER NOT FOUND DO break; ``` +### FETCH The client application then uses a `FETCH` statement to retrieve each row from the cursor `INTO` the previously declared host variables: @@ -360,6 +364,8 @@ else If the null indicator is `0` (that is, `false`), `v_comm` contains a meaningful value, and the `printf` function displays the commission. If the null indicator contains a non-zero value, `comm` is `NULL`, and `printf` displays the string `'NULL'`. A host variable (other than a null indicator) contains no meaningful value if you fetch a `NULL` into that host variable. You must use null indicators for any value which may be `NULL`. +### CLOSE + The final statements in the code sample close the cursor `(employees)` and the connection to the server: ```sql diff --git a/product_docs/docs/epas/15/application_programming/ecpgplus_guide/04_using_descriptors.mdx b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/04_using_descriptors.mdx index 6fe765fbd7b..473d1ea7b7b 100644 --- a/product_docs/docs/epas/15/application_programming/ecpgplus_guide/04_using_descriptors.mdx +++ b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/04_using_descriptors.mdx @@ -61,6 +61,8 @@ The following simple application executes an SQL statement entered by an end use The application accepts an SQL statement from an end user, tests the statement to see if it includes the `SELECT` keyword, and executes the statement. +### Using a SQL descriptor to execute a `SELECT` statement + When invoking the application, an end user must provide the name of the database on which to perform the SQL statement and a string that contains the text of the query. For example, a user might invoke the sample with the following command: @@ -244,6 +246,8 @@ The sample provides minimal error handling. When the application encounters a SQ EXEC SQL WHENEVER SQLERROR SQLPRINT; ``` +### Finding the data and metadata returned by the statement + The application includes a forward-declaration for a function named `print_meta_data()` that prints the metadata found in a descriptor: ```c diff --git a/product_docs/docs/epas/15/application_programming/ecpgplus_guide/index.mdx b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/index.mdx index d6a071e90c4..5f80ee87328 100644 --- a/product_docs/docs/epas/15/application_programming/ecpgplus_guide/index.mdx +++ b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/index.mdx @@ -3,6 +3,13 @@ navTitle: Including embedded SQL commands title: "Including embedded SQL commands" indexCards: simple description: "How to use ECPGPlus to complie applications" +navigation: + - 02_overview + - installing_ecpgplus + - 03_using_embedded_sql + - 04_using_descriptors + - 05_building_executing_dynamic_sql_statements + - 06_error_handling legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/ecpgplus-guide/9.6/toc.html" diff --git a/product_docs/docs/epas/15/application_programming/ecpgplus_guide/installing_ecpgplus.mdx b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/installing_ecpgplus.mdx new file mode 100644 index 00000000000..2ea014df730 --- /dev/null +++ b/product_docs/docs/epas/15/application_programming/ecpgplus_guide/installing_ecpgplus.mdx @@ -0,0 +1,145 @@ +--- +title: "Installing and configuring ECPGPlus" +description: "Provides an overview of the ECPGPlus precompiler capabilities" +--- + +On Windows, ECPGPlus is installed by the EDB Postgres Advanced Server installation wizard as part of the Database Server component. On Linux, you install ECPGPlus by running an executable. + +## Installing ECPGPlus + +On Linux, install with the `edb-asxx-server-devel` RPM package, where `xx` is the EDB Postgres Advanced Server version number. On Linux, the executable is located in: + +```text +/usr/edb/as14/bin +``` + +On Windows, the executable is located in: + +```text +C:\Program Files\edb\as14\bin +``` + +When invoking the ECPGPlus compiler, the executable must be in your search path (`%PATH%` on Windows, `$PATH` on Linux). For example, the following commands set the search path to include the directory that holds the ECPGPlus executable file `ecpg`. + +On Windows: + +```shell +set EDB_PATH=C:\Program Files\edb\as14\bin +set PATH=%EDB_PATH%;%PATH% +``` + +On Linux: + +```shell +export EDB_PATH==/usr/edb/as14/bin +export PATH=$EDB_PATH:$PATH +``` + +## Constructing a makefile + +A makefile contains a set of instructions that tell the make utility how to transform a program written in C that contains embedded SQL into a C program. To try the examples, you need: + +- A C compiler (and linker) +- The make utility +- ECPGPlus preprocessor and library +- A makefile that contains instructions for ECPGPlus + +The following code is an example of a makefile for the samples included in this documentation. To use the sample code, save it in a file named `makefile` in the directory that contains the source code file. + +```c +INCLUDES = -I$(shell pg_config --includedir) +LIBPATH = -L $(shell pg_config --libdir) +CFLAGS += $(INCLUDES) -g +LDFLAGS += -g +LDLIBS += $(LIBPATH) -lecpg -lpq + +.SUFFIXES: .pgc,.pc + +.pgc.c: + ecpg -c $(INCLUDES) $? + +.pc.c: + ecpg -C PROC -c $(INCLUDES) $? +``` + +The first two lines use the `pg_config` program to locate the necessary header files and library directories: + +```sql +INCLUDES = -I$(shell pg_config --includedir) +LIBPATH = -L $(shell pg_config --libdir) +``` + +The `pg_config` program is shipped with EDB Postgres Advanced Server. + +make knows to use the `CFLAGS` variable when running the C compiler and `LDFLAGS` and `LDLIBS` when invoking the linker. ECPG programs must be linked against the ECPG runtime library (`-lecpg`) and the libpq library (`-lpq`). + +```sql +CFLAGS += $(INCLUDES) -g +LDFLAGS += -g +LDLIBS += $(LIBPATH) -lecpg -lpq +``` + +The sample makefile tells make how to translate a `.pgc` or a `.pc` file into a C program. Two lines in the makefile specify the mode in which the source file is compiled. The first compile option is: + +```c +.pgc.c: + ecpg -c $(INCLUDES) $? +``` + +The first option tells make how to transform a file that ends in `.pgc` (presumably, an ECPG source file) into a file that ends in `.c` (a C program), using community ECPG, without the ECPGPlus enhancements. It invokes the ECPG precompiler with the `-c` flag, which instructs the compiler to convert SQL code into C, using the value of the `INCLUDES` variable and the name of the `.pgc` file. + +```c +.pc.c: + ecpg -C PROC -c $(INCLUDES) $? +``` + +The second option tells make how to transform a file that ends in `.pg` (an ECPG source file) into a file that ends in `.c` (a C program) using the ECPGPlus extensions. It invokes the ECPG precompiler with the `-c` flag, which instructs the compiler to convert SQL code to C. It also uses the `-C PROC` flag, which instructs the compiler to use ECPGPlus in Pro\*C-compatibility mode, using the value of the `INCLUDES` variable and the name of the `.pgc` file. + +When you run make, pass the name of the ECPG source code file you want to compile. For example, to compile an ECPG source code file named `customer_list.pgc`, use the command: + +```shell +make customer_list +``` + +The make utility: + +1. Consults the makefile located in the current directory. +1. Discovers that the makefile contains a rule that compiles `customer_list.pgc` into a C program (`customer_list.c`). +1. Uses the rules built into `make` to compile `customer_list.c` into an executable program. + +## ECPGPlus command line options + +In the sample makefile, make includes the `-C` option when invoking ECPGPlus to invoke ECPGPlus in Pro\*C-compatible mode. + +If you include the `-C` `PROC` keywords on the command line, in addition to the ECPG syntax, you can use Pro\*C command line syntax. For example: + +```shell +$ ecpg -C PROC INCLUDE=/usr/edb/as14/include acct_update.c +``` + +To display a complete list of the other ECPGPlus options available, in the ECPGPlus installation directory, enter: + +```shell +./ecpg --help +``` + +The command line options are: + +| Option | Description | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| -c | Automatically generate C code from embedded SQL code. | +| -C *mode* | Use the `-C` option to specify a compatibility mode:

`INFORMIX`

`INFORMIX_SE`

`PROC` | +| -D *symbol* | Define a preprocessor *symbol*.

The *-D* keyword is not supported when compiling in *PROC mode.* Instead, use the Oracle-style *‘DEFINE=’* clause. | +| -h | Parse a header file, this option includes option `'-c'`. | +| -i | Parse system, include files as well. | +| -I directory | Search *directory* for `include` files. | +| -o *outfile* | Write the result to *outfile*. | +| -r *option* | Specify runtime behavior; *option* can be:

`no_indicator` - Don't use indicators, but instead use special values to represent NULL values.

`prepare` - Prepare all statements before using them.

`questionmarks` - Allow use of a question mark as a placeholder.

`usebulk` - Enable bulk processing for `INSERT`, `UPDATE`, and `DELETE` statements that operate on host variable arrays. | +| --regression | Run in regression testing mode. | +| -t | Turn on `autocommit` of transactions. | +| -l | Disable `#line` directives. | +| --help | Display the help options. | +| --version | Output version information. | + +!!! Note + If you don't specify an output file name when invoking ECPGPlus, the output file name is created by removing the `.pgc` extension from the file name and appending `.c`. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx index 8f1e7955208..406fca1626d 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/06_procedure_and_function_parameters/index.mdx @@ -1,10 +1,10 @@ --- title: "Procedure and function parameters" +indexCards: simple navigation: - declaring_parameters - 01_positional_vs_named_parameter_notation -- 02_parameter_modesindexCards: simple - +- 02_parameter_modes - 03_using_default_values_in_parameters legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/01_associative_arrays.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/01_associative_arrays.mdx index cd8ff88a2e6..e79928637d4 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/01_associative_arrays.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/01_associative_arrays.mdx @@ -8,6 +8,8 @@ redirects: An *associative array* is a type of collection that associates a unique key with a value. The key doesn't have to be numeric. It can be character data as well. +## Associative array overview + An associative array has the following characteristics: - You must define an *associative array type* after which you can declare *array variables* of that array type. Data manipulation occurs using the array variable. @@ -18,6 +20,8 @@ An associative array has the following characteristics: - The array can be sparse. There can be gaps in the assignment of values to keys. - An attempt to reference an array element that hasn't been assigned a value results in an exception. +## Defining an associative array + The `TYPE IS TABLE OF ... INDEX BY` statement is used to define an associative array type: ```sql @@ -37,6 +41,8 @@ Where: `n` is the maximum length of a character key. +## Declaring a variable + To make use of the array, you must declare a *variable* with that array type. The following is the syntax for declaring an array variable: ```text @@ -49,6 +55,8 @@ Where: `assoctype` is the identifier of a previously defined array type. +## Referencing an element of the array + Reference an element of the array using the following syntax: ```text @@ -61,6 +69,8 @@ Reference an element of the array using the following syntax: If the array type of `array` is defined from a record type or object type, then `[.field ]` must reference an individual field in the record type or attribute in the object type from which the array type is defined. Alternatively, you can reference the entire record by omitting `[.field ]`. +## Examples + This example reads the first 10 employee names from the `emp` table, stores them in an array, and then displays the results from the array: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/02_nested_tables.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/02_nested_tables.mdx index 23d733f805d..e52eafb0894 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/02_nested_tables.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/02_nested_tables.mdx @@ -6,15 +6,25 @@ redirects: -A *nested table* is a type of collection that associates a positive integer with a value. A nested table has the following characteristics: +A *nested table* is a type of collection that associates a positive integer with a value. + +## Nested tables overview + +A nested table has the following characteristics: - You must define a *nested table type*. After that, you can declare *nested table variables* of that nested table type. Data manipulation occurs using the nested table variable, also known simply as a table. -- When you declare a nested table variable, the nested table doesn't yet exist. It is a null collection. You must initialize the null table with a *constructor*. You can also initialize the table by using an assignment statement where the right-hand side of the assignment is an initialized table of the same type. **Note:** Initialization of a nested table is mandatory in Oracle but optional in SPL. +- When you declare a nested table variable, the nested table doesn't yet exist. It is a null collection. You must initialize the null table with a *constructor*. You can also initialize the table by using an assignment statement where the right-hand side of the assignment is an initialized table of the same type. +!!! Note + Initialization of a nested table is mandatory in Oracle but optional in SPL. - The key is a positive integer. -- The constructor establishes the number of elements in the table. The `EXTEND` method adds elements to the table. For details, see [Collection methods](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/11_collection_methods/#collection_methods). **Note:** Using the constructor to establish the number of elements in the table and using the `EXTEND` method to add elements to the table are mandatory in Oracle but optional in SPL. +- The constructor establishes the number of elements in the table. The `EXTEND` method adds elements to the table. For details, see [Collection methods](/epas/latest/reference/application_programmer_reference/stored_procedural_language_reference/11_collection_methods/#collection_methods). +!!! Note + Using the constructor to establish the number of elements in the table and using the `EXTEND` method to add elements to the table are mandatory in Oracle but optional in SPL. - The table can be sparse. There can be gaps in assigning values to keys. - An attempt to reference a table element beyond its initialized or extended size results in a `SUBSCRIPT_BEYOND_COUNT` exception. +## Defining a nested table + Use the `TYPE IS TABLE` statement to define a nested table type in the declaration section of an SPL program: ```sql @@ -34,6 +44,8 @@ Where: !!! Note You can use the `CREATE TYPE` command to define a nested table type that's available to all SPL programs in the database. See [SQL reference](/../../../reference/oracle_compatibility_reference/epas_compat_sql/39_create_type/) for more information about the `CREATE TYPE` command. +## Declaring a variable + To use the table, you must declare a *variable* of that nested table type. The following is the syntax for declaring a table variable: ```text @@ -46,6 +58,8 @@ Where: `tbltype` is the identifier of a previously defined nested table type. +## Initializing the nested table + Initialize a nested table using the nested table type’s constructor: ```sql @@ -68,6 +82,8 @@ DECLARE v_nested nested_typ := nested_typ('A','B'); ``` +## Referencing an element of the table + Reference an element of the table using the following syntax: ```text @@ -82,6 +98,8 @@ Where: If the table type of `table` is defined from a record type or object type, then `[.element ]` must reference an individual field in the record type or attribute in the object type from which the nested table type is defined. Alternatively, you can reference the entire record or object by omitting `[.element ]`. +## Examples + This example shows a nested table where it's known that there are four elements: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/03_varrays.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/03_varrays.mdx index 5ba039ef6c9..91af00d8436 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/03_varrays.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/10_collections/03_varrays.mdx @@ -8,6 +8,8 @@ redirects: A *varray* or *variable-size array* is a type of collection that associates a positive integer with a value. In many respects, it's similar to a nested table. +## Varray overview + A varray has the following characteristics: - You must define a *varray type* with a maximum size limit. After you define the varray type, you can declare *varray variables* of that varray type. Data manipulation occurs using the varray variable, also known simply as a varray. The number of elements in the varray can't exceed the maximum size limit set in the varray type definition. @@ -18,6 +20,8 @@ A varray has the following characteristics: - An attempt to reference a varray element beyond its initialized or extended size but within the maximum size limit results in a `SUBSCRIPT_BEYOND_COUNT` exception. - An attempt to reference a varray element beyond the maximum size limit or extend a varray beyond the maximum size limit results in a `SUBSCRIPT_OUTSIDE_LIMIT` exception. +## Defining a varray type + The `TYPE IS VARRAY` statement is used to define a varray type in the declaration section of an SPL program: ```sql @@ -47,6 +51,8 @@ Where: `varraytype` is the identifier of a previously defined varray type. +## Initializing a varray + Initialize a varray using the varray type’s constructor: ```text @@ -70,6 +76,8 @@ DECLARE v_varray varray_typ := varray_typ('A','B'); ``` +## Referencing an element of the varray + Reference an element of the varray using this syntax: ```text diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/01_table.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/01_table.mdx index d33cbd04ace..7aa18549097 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/01_table.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/01_table.mdx @@ -1,5 +1,5 @@ --- -title: "TABLE()" +title: "TABLE" redirects: - ../../../../epas_compat_spl/12_working_with_collections/01_table #generated for docs/epas/reorg-role-use-case-mode --- diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/02_using_the_multiset_union_operator.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/02_using_the_multiset_union_operator.mdx index c4b0896398e..1bf2775f68e 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/02_using_the_multiset_union_operator.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/02_using_the_multiset_union_operator.mdx @@ -18,6 +18,8 @@ Include the `ALL` keyword to specify to represent duplicate elements (elements t Include the `DISTINCT` or `UNIQUE` keyword to include duplicate elements in the result only once. There is no difference between the `DISTINCT` and `UNIQUE` keywords. +## Combining collections + This example uses the `MULTISET UNION` operator to combine `collection_1` and `collection_2` into a third collection, `collection_3`: ```sql @@ -76,6 +78,8 @@ Results: 10 20 30 40 The resulting collection includes only those members with distinct values. +## Removing duplicate entries + In this example, the `MULTISET UNION DISTINCT` operator removes duplicate entries that are stored in the same collection: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement.mdx index 2bfac6d669d..23c32eb058c 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/03_using_the_forall_statement.mdx @@ -6,7 +6,9 @@ redirects: -You can use collections to process DML commands more efficiently by passing all the values to be used for repetitive execution of a `DELETE`, `INSERT`, or `UPDATE` command in one pass to the database server. The alternative is to reiteratively invoking the DML command with new values. Specify the DML command to process this way with the `FORALL` statement. In addition, provide one or more collections in the DML command where you want to subsitute different values are each time the command is executed. +You can use collections to process DML commands more efficiently by passing all the values to be used for repetitive execution of a `DELETE`, `INSERT`, or `UPDATE` command in one pass to the database server. The alternative is to reiteratively invoking the DML command with new values. Specify the DML command to process this way with the `FORALL` statement. In addition, provide one or more collections in the DML command where you want to subsitute different values each time the command is executed. + +## Syntax ```sql FORALL IN .. @@ -15,9 +17,11 @@ FORALL IN .. `index` is the position in the collection given in the `insert_stmt`, `update_stmt`, or `delete_stmt` DML command that iterates from the integer value given as `lower_bound` up to and including `upper_bound.` -If an exception occurs during any iteration of the `FORALL` statement, all updates that occurred since the start of the execution of the `FORALL` statement are rolled back. This behavior isn't compatible with Oracle databases. Oracle allows explicit use of the `COMMIT` or `ROLLBACK` commands to control whether to commit or roll back updates that occurred prior to the exception. +## How it works + +If an exception occurs during any iteration of the `FORALL` statement, all updates that occurred since the start of the execution of the `FORALL` statement are rolled back. This behavior **isn't compatible** with Oracle databases. Oracle allows explicit use of the `COMMIT` or `ROLLBACK` commands to control whether to commit or roll back updates that occurred prior to the exception. -The `FORALL` statement creates a loop. Each iteration of the loop increments the `index` variable. You typically use the `index` in the loop to select a member of a collection. Control the number of iterations with the `lower_bound .. upper_bound` clause. The loop is executes once for each integer between the `lower_bound` and `upper_bound` (inclusive), and the index increments by one for each iteration. +The `FORALL` statement creates a loop. Each iteration of the loop increments the `index` variable. You typically use the `index` in the loop to select a member of a collection. Control the number of iterations with the `lower_bound .. upper_bound` clause. The loop executes once for each integer between the `lower_bound` and `upper_bound` (inclusive), and the index increments by one for each iteration. For example: @@ -25,6 +29,8 @@ For example: This expression creates a loop that executes four times. In the first iteration, `index (i)` is set to the value `2`. In the second iteration, the index is set to the value `3`, and so on. The loop executes for the value `5` and then terminates. +## Using FORALL with CREATE + This example creates a table `emp_copy` that's an empty copy of the `emp` table. The example declares a type `emp_tbl` that's an array. Each element in the array is of composite type, composed of the column definitions used to create the table `emp`. The example also creates an index on the `emp_tbl` type. `t_emp` is an associative array of type `emp_tbl`. The `SELECT` statement uses the `BULK COLLECT INTO` command to populate the `t_emp` array. After the `t_emp` array is populated, the `FORALL` statement iterates through the values `(i)` in the `t_emp` array index and inserts a row for each record into `emp_copy`. @@ -47,6 +53,8 @@ BEGIN END; ``` +## Using FORALL with UPDATE + This example uses a `FORALL` statement to update the salary of three employees: ```sql @@ -76,6 +84,8 @@ __OUTPUT__ (3 rows) ``` +## Using FORALL with DELETE + This example deletes three employees in a `FORALL` statement: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/04_returning_bulk_collect.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/04_returning_bulk_collect.mdx index 9b9f73e348d..a837e6b5e77 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/04_returning_bulk_collect.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/04_returning_bulk_collect.mdx @@ -6,6 +6,8 @@ redirects: +## Syntax + You can add `BULK COLLECT` to the `RETURNING INTO` clause of a `DELETE`, `INSERT`, or `UPDATE` command: ```sql @@ -38,6 +40,8 @@ __OUTPUT__ (4 rows) ``` +## Examples + This example increases all employee salaries by 1.5, stores the employees’ numbers, names, and new salaries in three associative arrays, and displays the contents of these arrays: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/index.mdx index ed82c9f25c6..afb9be2ef57 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause/index.mdx @@ -1,5 +1,6 @@ --- title: "Using the BULK COLLECT clause" +indexCards: simple redirects: - ../../../../epas_compat_spl/12_working_with_collections/04_using_the_bulk_collect_clause #generated for docs/epas/reorg-role-use-case-mode --- diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/05_errors_and_messages.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/05_errors_and_messages.mdx index 45e1829df53..5bca27c4b7a 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/05_errors_and_messages.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/05_errors_and_messages.mdx @@ -10,6 +10,8 @@ redirects: +## Reporting messages + Use the `DBMS_OUTPUT.PUT_LINE` statement to report messages: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/02_types_of_triggers.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/02_types_of_triggers.mdx index 4d655394ad1..478001e4ee6 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/02_types_of_triggers.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/02_types_of_triggers.mdx @@ -10,14 +10,16 @@ redirects: -EDB Postgres Advanced Server supports *row-level* and *statement-level* triggers. A row-level trigger fires once for each row that's affected by a triggering event. For example, suppose deletion is defined as a triggering event on a table, and a single `DELETE` command is issued that deletes five rows from the table. In this case, the trigger fires five times, once for each row. +EDB Postgres Advanced Server supports *row-level* and *statement-level* triggers. -A statement-level trigger fires once per triggering statement, regardless of the number of rows affected by the triggering event. In the previous example of a single `DELETE` command deleting five rows, a statement-level trigger fires only once. +- A *row-level trigger* fires once for each row that's affected by a triggering event. For example, suppose deletion is defined as a triggering event on a table, and a single `DELETE` command is issued that deletes five rows from the table. In this case, the trigger fires five times, once for each row. + +- A *statement-level* trigger fires once per triggering statement, regardless of the number of rows affected by the triggering event. In the previous example of a single `DELETE` command deleting five rows, a statement-level trigger fires only once. You can define the sequence of actions regarding whether the trigger code block executes before or after the triggering statement for statement-level triggers. For row-level triggers, you can define whether the trigger code block executes before or after each row is affected by the triggering statement. -In a *before* row-level trigger, the trigger code block executes before the triggering action is carried out on each affected row. In a *before* statement-level trigger, the trigger code block executes before the action of the triggering statement is carried out. +- In a *before* row-level trigger, the trigger code block executes before the triggering action is carried out on each affected row. In a *before* statement-level trigger, the trigger code block executes before the action of the triggering statement is carried out. -In an *after* row-level trigger, the trigger code block executes after the triggering action is carried out on each affected row. In an *after* statement-level trigger, the trigger code block executes after the action of the triggering statement is carried out. +- In an *after* row-level trigger, the trigger code block executes after the triggering action is carried out on each affected row. In an *after* statement-level trigger, the trigger code block executes after the action of the triggering statement is carried out. -In a compound trigger, you can define a statement-level and a row-level trigger in a single trigger and fire it at more than one timing point. For details, see [Compound triggers](06_compound_triggers/#compound_triggers). +In a *compound trigger*, you can define a statement-level and a row-level trigger in a single trigger and fire it at more than one timing point. For details, see [Compound triggers](06_compound_triggers/#compound_triggers). diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/03_creating_triggers.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/03_creating_triggers.mdx index c9c0b4eac00..b5276df8e74 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/03_creating_triggers.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/03_creating_triggers.mdx @@ -10,14 +10,12 @@ redirects: -The `CREATE TRIGGER` command defines and names a trigger that's stored in the database. +The `CREATE TRIGGER` command defines and names a trigger that's stored in the database. You can create a simple trigger or a compound trigger. -## Name +## Creating a simple trigger `CREATE TRIGGER` — Define a simple trigger. -## Synopsis - ```sql CREATE [ OR REPLACE ] TRIGGER { BEFORE | AFTER | INSTEAD OF } @@ -39,12 +37,10 @@ CREATE [ OR REPLACE ] TRIGGER END ``` -## Name +## Creating a compound trigger `CREATE TRIGGER` — Define a compound trigger. -## Synopsis - ```sql CREATE [ OR REPLACE ] TRIGGER FOR { INSERT | UPDATE | DELETE | TRUNCATE } diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/04_trigger_variables.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/04_trigger_variables.mdx index a034f72f533..e3d3cc7fa66 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/04_trigger_variables.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/04_trigger_variables.mdx @@ -12,7 +12,7 @@ redirects: In the trigger code block, several special variables are available for use. -`NEW` +## NEW `NEW` is a pseudo-record name that refers to the new table row for insert and update operations in row-level triggers. This variable doesn't apply to statement-level triggers and delete operations of row-level triggers. @@ -28,7 +28,7 @@ In the trigger code block, several special variables are available for use. In the trigger code block, you can use `:NEW.column` like any other variable. If a value is assigned to `:NEW.column` in the code block of a before row-level trigger, the assigned value is used in the new inserted or updated row. -`OLD` +## OLD `OLD` is a pseudo-record name that refers to the old table row for update and delete operations in row-level triggers. This variable doesn't apply in statement-level triggers and in insert operations of row-level triggers. @@ -38,14 +38,14 @@ In the trigger code block, several special variables are available for use. In the trigger code block, you can use `:OLD.column` like any other variable. Assigning a value to `:OLD.column` has no effect on the action of the trigger. -`INSERTING` +## INSERTING `INSERTING` is a conditional expression that returns `TRUE` if an insert operation fired the trigger. Otherwise it returns `FALSE`. -`UPDATING` +## UPDATING `UPDATING` is a conditional expression that returns `TRUE` if an update operation fired the trigger. Otherwise it returns `FALSE`. -`DELETING` +## DELETING `DELETING` is a conditional expression that returns `TRUE` if a delete operation fired the trigger. Otherwise it returns `FALSE`. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/06_compound_triggers.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/06_compound_triggers.mdx index 0296d8f0e55..91be15cc9e8 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/06_compound_triggers.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/06_compound_triggers.mdx @@ -6,7 +6,11 @@ redirects: -EDB Postgres Advanced Server has compatible syntax to support compound triggers. A compound trigger combines all the triggering timings under one trigger body that you can invoke at one or more *timing points*. A timing point is a point in time related to a triggering statement, which is an `INSERT`, `UPDATE`, `DELETE`, or `TRUNCATE` statement that modifies data. The supported timing points are: +EDB Postgres Advanced Server has compatible syntax to support compound triggers. + +## Compound trigger overview + +A compound trigger combines all the triggering timings under one trigger body that you can invoke at one or more *timing points*. A timing point is a point in time related to a triggering statement, which is an `INSERT`, `UPDATE`, `DELETE`, or `TRUNCATE` statement that modifies data. The supported timing points are: - `BEFORE STATEMENT` — Before the triggering statement executes. - `BEFORE EACH ROW` — Before each row that the triggering statement affects. @@ -18,6 +22,8 @@ A compound trigger can include any combination of timing points defined in a sin The optional declaration section in a compound trigger allows you to declare trigger-level variables and subprograms. The content of the declaration is accessible to all timing points referenced by the trigger definition. The variables and subprograms created by the declaration persist only for the duration of the triggering statement. +## Syntax + A compound trigger contains a declaration followed by a PL block for each timing point: ```sql @@ -55,6 +61,8 @@ Trigger created. !!! Note You don't have to have all the four timing blocks. You can create a compound trigger for any of the required timing points. +## Restrictions + A compound trigger has the following restrictions: - A compound trigger body is made up of a compound trigger block. diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger.mdx index 7ecf1d30583..e14603cc862 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/06_compound_trigger.mdx @@ -6,92 +6,96 @@ redirects: -This example shows a compound trigger that records a change to the employee salary by defining a compound trigger (named `hr_trigger`) on the `emp` table. - -Create a table named `emp`: - -```sql -CREATE TABLE emp(EMPNO INT, ENAME TEXT, SAL INT, DEPTNO INT); -CREATE TABLE -``` - -Create a compound trigger named `hr_trigger`. The trigger uses each of the four timing points to modify the salary with an `INSERT`, `UPDATE`, or `DELETE` statement. In the global declaration section, the initial salary is declared as `10,000`. - -```sql -CREATE OR REPLACE TRIGGER hr_trigger - FOR INSERT OR UPDATE OR DELETE ON emp - COMPOUND TRIGGER - -- Global declaration. - var_sal NUMBER := 10000; - - BEFORE STATEMENT IS - BEGIN - var_sal := var_sal + 1000; - DBMS_OUTPUT.PUT_LINE('Before Statement: ' || var_sal); - END BEFORE STATEMENT; - - BEFORE EACH ROW IS - BEGIN - var_sal := var_sal + 1000; - DBMS_OUTPUT.PUT_LINE('Before Each Row: ' || var_sal); - END BEFORE EACH ROW; - - AFTER EACH ROW IS - BEGIN - var_sal := var_sal + 1000; - DBMS_OUTPUT.PUT_LINE('After Each Row: ' || var_sal); - END AFTER EACH ROW; - - AFTER STATEMENT IS - BEGIN - var_sal := var_sal + 1000; - DBMS_OUTPUT.PUT_LINE('After Statement: ' || var_sal); - END AFTER STATEMENT; - -END hr_trigger; - -Output: Trigger created. -``` - -Insert the record into table `emp`: - -```sql -INSERT INTO emp (EMPNO, ENAME, SAL, DEPTNO) VALUES(1111,'SMITH', 10000, 20); -``` - -The `INSERT` statement produces the following output: - -```sql -__OUTPUT__ -Before Statement: 11000 -Before each row: 12000 -After each row: 13000 -After statement: 14000 -INSERT 0 1 -``` - -The `UPDATE` statement updates the employee salary record, setting the salary to `15000` for a specific employee number: +## Defining a compound trigger on a table -```sql -UPDATE emp SET SAL = 15000 where EMPNO = 1111; -``` - -The `UPDATE` statement produces the following output: - -```sql -Before Statement: 11000 -Before each row: 12000 -After each row: 13000 -After statement: 14000 -UPDATE 1 +This example shows a compound trigger that records a change to the employee salary by defining a compound trigger (named `hr_trigger`) on the `emp` table. -SELECT * FROM emp; -__OUTPUT__ - EMPNO | ENAME | SAL | DEPTNO --------+-------+-------+-------- - 1111 | SMITH | 15000 | 20 -(1 row) -``` +1. Create a table named `emp`: + + ```sql + CREATE TABLE emp(EMPNO INT, ENAME TEXT, SAL INT, DEPTNO INT); + CREATE TABLE + ``` + +2. Create a compound trigger named `hr_trigger`. The trigger uses each of the four timing points to modify the salary with an `INSERT`, `UPDATE`, or `DELETE` statement. In the global declaration section, the initial salary is declared as `10,000`. + + ```sql + CREATE OR REPLACE TRIGGER hr_trigger + FOR INSERT OR UPDATE OR DELETE ON emp + COMPOUND TRIGGER + -- Global declaration. + var_sal NUMBER := 10000; + + BEFORE STATEMENT IS + BEGIN + var_sal := var_sal + 1000; + DBMS_OUTPUT.PUT_LINE('Before Statement: ' || var_sal); + END BEFORE STATEMENT; + + BEFORE EACH ROW IS + BEGIN + var_sal := var_sal + 1000; + DBMS_OUTPUT.PUT_LINE('Before Each Row: ' || var_sal); + END BEFORE EACH ROW; + + AFTER EACH ROW IS + BEGIN + var_sal := var_sal + 1000; + DBMS_OUTPUT.PUT_LINE('After Each Row: ' || var_sal); + END AFTER EACH ROW; + + AFTER STATEMENT IS + BEGIN + var_sal := var_sal + 1000; + DBMS_OUTPUT.PUT_LINE('After Statement: ' || var_sal); + END AFTER STATEMENT; + + END hr_trigger; + + Output: Trigger created. + ``` + +3. Insert the record into table `emp`: + + ```sql + INSERT INTO emp (EMPNO, ENAME, SAL, DEPTNO) VALUES(1111,'SMITH', 10000, 20); + ``` + + The `INSERT` statement produces the following output: + + ```sql + __OUTPUT__ + Before Statement: 11000 + Before each row: 12000 + After each row: 13000 + After statement: 14000 + INSERT 0 1 + ``` + +4. The `UPDATE` statement updates the employee salary record, setting the salary to `15000` for a specific employee number: + + ```sql + UPDATE emp SET SAL = 15000 where EMPNO = 1111; + ``` + + The `UPDATE` statement produces the following output: + + ```sql + Before Statement: 11000 + Before each row: 12000 + After each row: 13000 + After statement: 14000 + UPDATE 1 + + SELECT * FROM emp; + __OUTPUT__ + EMPNO | ENAME | SAL | DEPTNO + -------+-------+-------+-------- + 1111 | SMITH | 15000 | 20 + (1 row) + ``` + +### DELETE The `DELETE` statement deletes the employee salary record: @@ -114,6 +118,7 @@ __OUTPUT__ -------+-------+-----+-------- (0 rows) ``` +### TRUNCATE The `TRUNCATE` statement removes all the records from the `emp` table: @@ -153,6 +158,8 @@ TRUNCATE TABLE !!! Note You can use the `TRUNCATE` statement only at a `BEFORE STATEMENT` or `AFTER STATEMENT` timing point. +## Creating a compound trigger on a table with a WHEN condition + This example creates a compound trigger named `hr_trigger` on the `emp` table with a `WHEN` condition. The `WHEN` condition checks and prints the employee salary when an `INSERT`, `UPDATE`, or `DELETE` statement affects the `emp` table. The database evaluates the `WHEN` condition for a row-level trigger, and the trigger executes once per row if the `WHEN` condition evaluates to `TRUE`. The statement-level trigger executes regardless of the `WHEN` condition. ```sql @@ -184,6 +191,7 @@ CREATE OR REPLACE TRIGGER hr_trigger END hr_trigger; ``` +### INSERT Insert the record into table `emp`: @@ -202,6 +210,8 @@ After Statement INSERT 0 1 ``` +### UPDATE + The `UPDATE` statement updates the employee salary record, setting the salary to `7500`: ```sql @@ -225,6 +235,8 @@ __OUTPUT__ (1 row) ``` +### DELETE + The `DELETE` statement deletes the employee salary record: ```sql diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/index.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/index.mdx index e67ef491f68..9a2c48865da 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/index.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/13_triggers/07_trigger_examples/index.mdx @@ -1,5 +1,6 @@ --- title: "Trigger examples" +indexCards: simple legacyRedirectsGenerated: # This list is generated by a script. If you need add entries, use the `legacyRedirects` key. - "/edb-docs/d/edb-postgres-advanced-server/user-guides/database-compatibility-for-oracle-developers-guide/9.6/Database_Compatibility_for_Oracle_Developers_Guide_v9.6.1.088.html" diff --git a/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx b/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx index 34e714426d2..dbbae5fd254 100644 --- a/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx +++ b/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx @@ -11,7 +11,7 @@ EDB Postgres Advanced Server's `postgresql.conf` file includes a configuration p To enable DRITA: -1. Do one of the following: +- Do one of the following: - Modify the `postgresql.conf` file, setting the `timed_statistics` parameter to `TRUE`. @@ -21,7 +21,7 @@ To enable DRITA: SET timed_statistics = TRUE ``` -2. After modifying the `timed_statistics` parameter, take a starting snapshot. A snapshot captures the current state of each timer and event counter. The server compares the starting snapshot to a later snapshot to gauge system performance. Use the `edbsnap()` function to take the beginning snapshot: +- After modifying the `timed_statistics` parameter, take a starting snapshot. A snapshot captures the current state of each timer and event counter. The server compares the starting snapshot to a later snapshot to gauge system performance. Use the `edbsnap()` function to take the beginning snapshot: ```sql edb=# SELECT * FROM edbsnap(); @@ -32,7 +32,7 @@ To enable DRITA: (1 row) ``` -3. Run the workload that you want to evaluate. When the workload is complete or at a strategic point during the workload, take another snapshot: +- Run the workload that you want to evaluate. When the workload is complete or at a strategic point during the workload, take another snapshot: ```sql edb=# SELECT * FROM edbsnap(); From a68e51f640eaee98be94239a40e35c7a9fb57c24 Mon Sep 17 00:00:00 2001 From: francoughlin Date: Mon, 7 Aug 2023 16:31:32 -0400 Subject: [PATCH 04/38] Formatting fix Fix for example formatting --- .../taking_a_snapshot.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx b/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx index dbbae5fd254..c631390d089 100644 --- a/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx +++ b/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx @@ -17,9 +17,9 @@ To enable DRITA: - Connect to the server with the EDB-PSQL client and invoke the command: - ```sql - SET timed_statistics = TRUE - ``` +```sql +SET timed_statistics = TRUE +``` - After modifying the `timed_statistics` parameter, take a starting snapshot. A snapshot captures the current state of each timer and event counter. The server compares the starting snapshot to a later snapshot to gauge system performance. Use the `edbsnap()` function to take the beginning snapshot: From f19f42cc9e4d0d0167c04fd8a50d65fba8782a26 Mon Sep 17 00:00:00 2001 From: francoughlin Date: Mon, 7 Aug 2023 16:37:37 -0400 Subject: [PATCH 05/38] Additional edits --- .../taking_a_snapshot.mdx | 44 +++++++++---------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx b/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx index c631390d089..1b4c78de659 100644 --- a/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx +++ b/product_docs/docs/epas/15/managing_performance/04_dynamic_runtime_instrumentation_tools_architecture_DRITA/taking_a_snapshot.mdx @@ -7,15 +7,11 @@ description: "Describes how to take a snapshot of system performance data" EDB Postgres Advanced Server's `postgresql.conf` file includes a configuration parameter named `timed_statistics` that controls collecting timing data. The valid parameter values are `TRUE` or `FALSE`. The default value is `FALSE`. -`timed_statistics` is a dynamic parameter that you can modify in the `postgresql.conf` file or while a session is in progress. +`timed_statistics` is a dynamic parameter that you can modify in the `postgresql.conf` file or while a session is in progress. To enable DRITA, you must either: -To enable DRITA: +- Modify the `postgresql.conf` file, setting the `timed_statistics` parameter to `TRUE`. -- Do one of the following: - - - Modify the `postgresql.conf` file, setting the `timed_statistics` parameter to `TRUE`. - - - Connect to the server with the EDB-PSQL client and invoke the command: +- Connect to the server with the EDB-PSQL client and invoke the command: ```sql SET timed_statistics = TRUE @@ -23,26 +19,26 @@ SET timed_statistics = TRUE - After modifying the `timed_statistics` parameter, take a starting snapshot. A snapshot captures the current state of each timer and event counter. The server compares the starting snapshot to a later snapshot to gauge system performance. Use the `edbsnap()` function to take the beginning snapshot: - ```sql - edb=# SELECT * FROM edbsnap(); - __OUTPUT__ - edbsnap - ---------------------- - Statement processed. - (1 row) - ``` +```sql +edb=# SELECT * FROM edbsnap(); +__OUTPUT__ + edbsnap +---------------------- + Statement processed. +(1 row) +``` - Run the workload that you want to evaluate. When the workload is complete or at a strategic point during the workload, take another snapshot: - ```sql - edb=# SELECT * FROM edbsnap(); - __OUTPUT__ - edbsnap - ---------------------- - Statement processed. - (1 row) - ``` +```sql +edb=# SELECT * FROM edbsnap(); +__OUTPUT__ + edbsnap +---------------------- + Statement processed. +(1 row) +``` -You can capture multiple snapshots during a session. Then, use the DRITA functions and reports to manage and compare the snapshots to evaluate performance information. +You can capture multiple snapshots during a session. Finally, you can use the DRITA functions and reports to manage and compare the snapshots to evaluate performance information. From 9052c5890c5cdb94e7784ad5046c28d9fa45a565 Mon Sep 17 00:00:00 2001 From: Betsy Gitelman <93718720+ebgitelman@users.noreply.github.com> Date: Tue, 8 Aug 2023 16:20:07 -0400 Subject: [PATCH 06/38] edits to PR4554 --- .../02_index_advisor/05_index_advisor_limitations.mdx | 4 ++-- .../docs/epas/15/managing_performance/03_sql_profiler.mdx | 2 +- .../docs/epas/15/managing_performance/using_dynatune.mdx | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/product_docs/docs/epas/15/managing_performance/02_index_advisor/05_index_advisor_limitations.mdx b/product_docs/docs/epas/15/managing_performance/02_index_advisor/05_index_advisor_limitations.mdx index 5fa95cec750..0174eae9fad 100644 --- a/product_docs/docs/epas/15/managing_performance/02_index_advisor/05_index_advisor_limitations.mdx +++ b/product_docs/docs/epas/15/managing_performance/02_index_advisor/05_index_advisor_limitations.mdx @@ -9,13 +9,13 @@ redirects: Prior to running the Index Advisor feature, review the following limitations: -- Index Advisor doesn't consider Index Only scans. It does consider Index scans when making recommendations. +- Index Advisor doesn't consider index-only scans. It does consider index scans when making recommendations. - Index Advisor ignores any computations found in the `WHERE` clause. Effectively, the index field in the recommendations isn't any kind of expression. The field is a simple column name. - Index Advisor doesn't consider inheritance when recommending hypothetical indexes. If a query references a parent table, Index Advisor doesn't make any index recommendations on child tables. -- Suppose you're restoring a `pg_dump` backup file that includes the `index_advisor_log` table or any tables for which indexing recommendations were made and stored in the `index_advisor_log` table. Changes in object identifiers (OIDs) can result in "broken links" between the `index_advisor_log` table and the restored tables referenced by rows in the `index_advisor_log` table. +- Suppose you're restoring a pg_dump backup file that includes the `index_advisor_log` table or any tables for which indexing recommendations were made and stored in the `index_advisor_log` table. Changes in object identifiers (OIDs) can result in broken links between the `index_advisor_log` table and the restored tables referenced by rows in the `index_advisor_log` table. - If you need to display the recommendations made prior to the backup, you can replace the old OIDs in the `reloid` column of the `index_advisor_log` table with the new OIDs of the referenced tables using the SQL `UPDATE` statement: diff --git a/product_docs/docs/epas/15/managing_performance/03_sql_profiler.mdx b/product_docs/docs/epas/15/managing_performance/03_sql_profiler.mdx index ab200aa14ff..8b177a46f1e 100644 --- a/product_docs/docs/epas/15/managing_performance/03_sql_profiler.mdx +++ b/product_docs/docs/epas/15/managing_performance/03_sql_profiler.mdx @@ -11,7 +11,7 @@ redirects: -Inefficient SQL code is a leading cause of database performance problems. The challenge for database administrators and developers is locating and then optimizing this code in large, complex systems. *SQL Profiler* helps you locate and optimize poorly running SQL code. +Inefficient SQL code is a leading cause of database performance problems. The challenge for database administrators and developers is locating and then optimizing this code in large, complex systems. SQL Profiler helps you locate and optimize poorly running SQL code. Specific features and benefits of SQL Profiler include: diff --git a/product_docs/docs/epas/15/managing_performance/using_dynatune.mdx b/product_docs/docs/epas/15/managing_performance/using_dynatune.mdx index 7dba2b1116c..af07517e15e 100644 --- a/product_docs/docs/epas/15/managing_performance/using_dynatune.mdx +++ b/product_docs/docs/epas/15/managing_performance/using_dynatune.mdx @@ -24,7 +24,7 @@ Here are some things to keep in mind when setting up dynamic tuning: - A low, non-zero value, that is, 1–33, dedicates the least amount of the host machine's resources to the database server. Use this setting for a development machine where many other applications are being used. -- A value in the range of 34—66 dedicates a moderate amount of resources to the database server. You might use this setting for a dedicated application server that has a fixed number of other applications running on the same machine as EDB Postgres Advanced Server. +- A value in the range of 34–66 dedicates a moderate amount of resources to the database server. You might use this setting for a dedicated application server that has a fixed number of other applications running on the same machine as EDB Postgres Advanced Server. - The highest values, that is, 67–100, dedicate most of the server's resources to the database server. Use this setting for a host machine that's totally dedicated to running EDB Postgres Advanced Server. From dd360a2bd4467ac681dcb2f5826697cdd2f38812 Mon Sep 17 00:00:00 2001 From: francoughlin Date: Thu, 10 Aug 2023 11:37:24 -0400 Subject: [PATCH 07/38] Final edits to Application programming branch --- .../07_accessing_subprogram_variables.mdx | 164 +++++++++--------- .../04_definers_vs_invokers_rights.mdx | 2 +- .../05_security_example.mdx | 2 +- .../12_working_with_collections/01_table.mdx | 2 +- 4 files changed, 86 insertions(+), 84 deletions(-) diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables.mdx index c12de40b1df..18e65458340 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/07_subprograms_subprocedures_and_subfunctions/07_accessing_subprogram_variables.mdx @@ -52,85 +52,6 @@ You can't access the following location of variables relative to the block from !!! Note The EDB Postgres Advanced Server process for accessing variables isn't compatible with Oracle databases. For Oracle, you can specify any number of qualifiers, and the search is based on the first match of the first qualifier in a similar manner to the Oracle matching algorithm for invoking subprograms. -## Example: Accessing variables in blocks - -This example shows how variables in various blocks are accessed, with and without qualifiers. The lines that are commented out show attempts to access variables that result in an error. - -```sql -CREATE OR REPLACE PROCEDURE level_0 -IS - v_level_0 VARCHAR2(20) := 'Value from level_0'; - PROCEDURE level_1a - IS - v_level_1a VARCHAR2(20) := 'Value from level_1a'; - PROCEDURE level_2a - IS - v_level_2a VARCHAR2(20) := 'Value from level_2a'; - BEGIN - DBMS_OUTPUT.PUT_LINE('...... BLOCK level_2a'); - DBMS_OUTPUT.PUT_LINE('........ v_level_2a: ' || v_level_2a); - DBMS_OUTPUT.PUT_LINE('........ v_level_1a: ' || v_level_1a); - DBMS_OUTPUT.PUT_LINE('........ level_1a.v_level_1a: ' || level_1a.v_level_1a); - DBMS_OUTPUT.PUT_LINE('........ v_level_0: ' || v_level_0); - DBMS_OUTPUT.PUT_LINE('........ level_0.v_level_0: ' || level_0.v_level_0); - DBMS_OUTPUT.PUT_LINE('...... END BLOCK level_2a'); - END level_2a; - BEGIN - DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1a'); - level_2a; --- DBMS_OUTPUT.PUT_LINE('.... v_level_2a: ' || v_level_2a); --- Error - Descendent block ----^ --- DBMS_OUTPUT.PUT_LINE('.... level_2a.v_level_2a: ' || level_2a.v_level_2a); --- Error - Descendent block ---------------^ - DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1a'); - END level_1a; - PROCEDURE level_1b - IS - v_level_1b VARCHAR2(20) := 'Value from level_1b'; - BEGIN - DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1b'); - DBMS_OUTPUT.PUT_LINE('.... v_level_1b: ' || v_level_1b); - DBMS_OUTPUT.PUT_LINE('.... v_level_0 : ' || v_level_0); --- DBMS_OUTPUT.PUT_LINE('.... level_1a.v_level_1a: ' || level_1a.v_level_1a); --- Error - Sibling block -----------------^ --- DBMS_OUTPUT.PUT_LINE('.... level_2a.v_level_2a: ' || level_2a.v_level_2a); --- Error - Sibling block descendant ------^ - DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1b'); - END level_1b; -BEGIN - DBMS_OUTPUT.PUT_LINE('BLOCK level_0'); - DBMS_OUTPUT.PUT_LINE('.. v_level_0: ' || v_level_0); - level_1a; - level_1b; - DBMS_OUTPUT.PUT_LINE('END BLOCK level_0'); -END level_0; -``` - -The following is the output showing the content of each variable when the procedure is invoked: - -```sql -BEGIN - level_0; -END; -__OUTPUT__ -BLOCK level_0 -.. v_level_0: Value from level_0 -.. BLOCK level_1a -...... BLOCK level_2a -........ v_level_2a: Value from level_2a -........ v_level_1a: Value from level_1a -........ level_1a.v_level_1a: Value from level_1a -........ v_level_0: Value from level_0 -........ level_0.v_level_0: Value from level_0 -...... END BLOCK level_2a -.. END BLOCK level_1a -.. BLOCK level_1b -.... v_level_1b: Value from level_1b -.... v_level_0 : Value from level_0 -.. END BLOCK level_1b -END BLOCK level_0 -``` - ## Accessing variables with the same name This example shows similar access attempts when all variables in all blocks have the same name: @@ -263,7 +184,9 @@ BLOCK level_0 .. END BLOCK level_1b END BLOCK level_0 ``` -## Example Accessing record types in parent blocks +## Examples + +### Example: Accessing record types in parent blocks This example is an object type whose object type method, `display_emp`, contains the record type `emp_typ` and the subprocedure `emp_sal_query`. The record variable `r_emp` declared locally to `emp_sal_query` can access the record type `emp_typ` declared in the parent block `display_emp`. @@ -335,7 +258,7 @@ Salary : 950.00 Dept # : 30 Employee's salary does not exceed the department average of 1566.67 ``` -## Example: Accessing an upper-level procedure +### Example: Accessing an upper-level procedure This example is a package with three levels of subprocedures. A record type, collection type, and cursor type declared in the upper-level procedure can be accessed by the descendent subprocedure. @@ -407,3 +330,82 @@ EMPNO ENAME 7876 ADAMS 7902 FORD ``` + +### Example: Accessing variables in blocks + +This example shows how variables in various blocks are accessed, with and without qualifiers. The lines that are commented out show attempts to access variables that result in an error. + +```sql +CREATE OR REPLACE PROCEDURE level_0 +IS + v_level_0 VARCHAR2(20) := 'Value from level_0'; + PROCEDURE level_1a + IS + v_level_1a VARCHAR2(20) := 'Value from level_1a'; + PROCEDURE level_2a + IS + v_level_2a VARCHAR2(20) := 'Value from level_2a'; + BEGIN + DBMS_OUTPUT.PUT_LINE('...... BLOCK level_2a'); + DBMS_OUTPUT.PUT_LINE('........ v_level_2a: ' || v_level_2a); + DBMS_OUTPUT.PUT_LINE('........ v_level_1a: ' || v_level_1a); + DBMS_OUTPUT.PUT_LINE('........ level_1a.v_level_1a: ' || level_1a.v_level_1a); + DBMS_OUTPUT.PUT_LINE('........ v_level_0: ' || v_level_0); + DBMS_OUTPUT.PUT_LINE('........ level_0.v_level_0: ' || level_0.v_level_0); + DBMS_OUTPUT.PUT_LINE('...... END BLOCK level_2a'); + END level_2a; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1a'); + level_2a; +-- DBMS_OUTPUT.PUT_LINE('.... v_level_2a: ' || v_level_2a); +-- Error - Descendent block ----^ +-- DBMS_OUTPUT.PUT_LINE('.... level_2a.v_level_2a: ' || level_2a.v_level_2a); +-- Error - Descendent block ---------------^ + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1a'); + END level_1a; + PROCEDURE level_1b + IS + v_level_1b VARCHAR2(20) := 'Value from level_1b'; + BEGIN + DBMS_OUTPUT.PUT_LINE('.. BLOCK level_1b'); + DBMS_OUTPUT.PUT_LINE('.... v_level_1b: ' || v_level_1b); + DBMS_OUTPUT.PUT_LINE('.... v_level_0 : ' || v_level_0); +-- DBMS_OUTPUT.PUT_LINE('.... level_1a.v_level_1a: ' || level_1a.v_level_1a); +-- Error - Sibling block -----------------^ +-- DBMS_OUTPUT.PUT_LINE('.... level_2a.v_level_2a: ' || level_2a.v_level_2a); +-- Error - Sibling block descendant ------^ + DBMS_OUTPUT.PUT_LINE('.. END BLOCK level_1b'); + END level_1b; +BEGIN + DBMS_OUTPUT.PUT_LINE('BLOCK level_0'); + DBMS_OUTPUT.PUT_LINE('.. v_level_0: ' || v_level_0); + level_1a; + level_1b; + DBMS_OUTPUT.PUT_LINE('END BLOCK level_0'); +END level_0; +``` + +The following is the output showing the content of each variable when the procedure is invoked: + +```sql +BEGIN + level_0; +END; +__OUTPUT__ +BLOCK level_0 +.. v_level_0: Value from level_0 +.. BLOCK level_1a +...... BLOCK level_2a +........ v_level_2a: Value from level_2a +........ v_level_1a: Value from level_1a +........ level_1a.v_level_1a: Value from level_1a +........ v_level_0: Value from level_0 +........ level_0.v_level_0: Value from level_0 +...... END BLOCK level_2a +.. END BLOCK level_1a +.. BLOCK level_1b +.... v_level_1b: Value from level_1b +.... v_level_0 : Value from level_0 +.. END BLOCK level_1b +END BLOCK level_0 +``` diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/04_definers_vs_invokers_rights.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/04_definers_vs_invokers_rights.mdx index 919ac929483..69cba2da0ea 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/04_definers_vs_invokers_rights.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/04_definers_vs_invokers_rights.mdx @@ -1,5 +1,5 @@ --- -title: "Definer's versus invoker's rights" +title: "About definer and invoker rights" redirects: - ../../../../../epas_compat_spl/02_spl_programs/09_program_security/04_definers_vs_invokers_rights #generated for docs/epas/reorg-role-use-case-mode --- diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/05_security_example.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/05_security_example.mdx index c899af4797b..c51c832e8f7 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/05_security_example.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/02_spl_programs/09_program_security/05_security_example.mdx @@ -170,7 +170,7 @@ GRANT EXECUTE ON FUNCTION new_empno() TO sales_mgr; GRANT EXECUTE ON PACKAGE emp_admin TO sales_mgr; ``` -## Step 8: Run programs list_emp and hire_clerk** +## Step 8: Run programs list_emp and hire_clerk Connect as user `sales_mgr`, and run the following anonymous block: diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/01_table.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/01_table.mdx index 7aa18549097..04963a3d947 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/01_table.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/12_working_with_collections/01_table.mdx @@ -1,5 +1,5 @@ --- -title: "TABLE" +title: "Using the TABLE function" redirects: - ../../../../epas_compat_spl/12_working_with_collections/01_table #generated for docs/epas/reorg-role-use-case-mode --- From 14022bd405be786454090b0151d4c751d563ff47 Mon Sep 17 00:00:00 2001 From: Betsy Gitelman <93718720+ebgitelman@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:03:46 -0400 Subject: [PATCH 08/38] Edit to PR4592 --- product_docs/docs/pgd/4/harp/04_configuration.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/pgd/4/harp/04_configuration.mdx b/product_docs/docs/pgd/4/harp/04_configuration.mdx index e7e55e2878e..64757d187a8 100644 --- a/product_docs/docs/pgd/4/harp/04_configuration.mdx +++ b/product_docs/docs/pgd/4/harp/04_configuration.mdx @@ -110,7 +110,7 @@ configuration file: - **`ssl_key_file`**: Client SSL key file. !!! Tip - When `ssl` is set to `on`, the etcd endpoint URLs must contain the `https` scheme. For example, `https://host1:2379`. + When `ssl` is set to `on`, the etcd endpoint URLs must contain the `https` scheme, for example, `https://host1:2379`. #### Example From 6f44d2be2c37f53a0d698a9d743538c86fc489e3 Mon Sep 17 00:00:00 2001 From: Betsy Gitelman <93718720+ebgitelman@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:07:49 -0400 Subject: [PATCH 09/38] edits to pr4558 fixed contractions --- product_docs/docs/pem/9/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/pem/9/index.mdx b/product_docs/docs/pem/9/index.mdx index 5ffaceaf497..0bcc844d2d9 100644 --- a/product_docs/docs/pem/9/index.mdx +++ b/product_docs/docs/pem/9/index.mdx @@ -72,4 +72,4 @@ Supported versions of Postgres for PEM 9.x: |**PostgreSQL (PG)** |11, 12, 13, 14, 15 |11, 12, 13, 14, 15 | |**EDB Postgres Extended Server (PGE)** |11, 12, 13, 14, 15 |12, 13, 14, 15[^1] | -[^1]: sslutils is not available for RHEL 7 on IBM Power, so this distribution cannot use PGE as a backend. +[^1]: sslutils isn't available for RHEL 7 on IBM Power, so this distribution can't use PGE as a backend. From 2a0d49ac49d87432832056aa2b7f406b5ddf1de7 Mon Sep 17 00:00:00 2001 From: Betsy Gitelman <93718720+ebgitelman@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:34:57 -0400 Subject: [PATCH 10/38] Edits to PR4559 --- .../docs/pem/9/upgrading/upgrading_httpd.mdx | 33 +++++++++++-------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/product_docs/docs/pem/9/upgrading/upgrading_httpd.mdx b/product_docs/docs/pem/9/upgrading/upgrading_httpd.mdx index 00dcba9560e..42b16ecc627 100644 --- a/product_docs/docs/pem/9/upgrading/upgrading_httpd.mdx +++ b/product_docs/docs/pem/9/upgrading/upgrading_httpd.mdx @@ -1,56 +1,61 @@ --- -title: "Upgrading Apache HTTPD Server" +title: "Upgrading Apache HTTPD server" redirects: - /pem/latest/pem_upgrade/upgrade_httpd/ --- The Apache HTTPD web server is used to serve the PEM web application. -On Linux, PEM does not bundle HTTPD, but depends on the version provided by the system package manager. -On Windows the PEM installer bundles a version of HTTPD. +On Linux, PEM doesn't bundle HTTPD but depends on the version provided by the system package manager. +On Windows, the PEM installer bundles a version of HTTPD. ## Upgrading HTTPD on Linux !!! Important - If you are using Red Hat Enterprise Linux (RHEL), Rocky Linux, AlmaLinux, or Oracle Linux, and updating HTTPD in order to address a vulnerability, + If you're using Red Hat Enterprise Linux (RHEL), Rocky Linux, AlmaLinux, or Oracle Linux, and you're updating HTTPD to address a vulnerability, read [Note on HTTPD versioning in RHEL and RHEL derivatives](#httpd-versioning-in-rhel-and-rhel-derivatives). -If you are running Linux, HTTPD should be managed along with other system software on your servers. +If you're running Linux, manage HTTPD along with other system software on your servers. We recommend always using the latest version available from your package manager. -You do not need to stop PEM or back up any files prior to upgrade, simply initiate the upgrade from your package manager. +You don't need to stop PEM or back up any files prior to upgrade. Initiate the upgrade from your package manager. #### RHEL / Rocky Linux / AlmaLinux / Oracle Linux + ```bash sudo yum update httpd ``` #### Debian / Ubuntu + ```bash sudo apt update sudo apt upgrade apache2 ``` #### SLES + ```bash sudo zypper update apache2 ``` ### HTTPD versioning in RHEL and RHEL derivatives -You may notice that the latest version of HTTPD available from RHEL (or other RHEL derivatives) has a significantly older version number than the latest community release. -Specifically, RHEL 7 provides HTTPD 2.4.6 and RHEL 8 provides 2.4.37. It is very important to understand that Red Hat builds custom HTTPD packages for RHEL and backports security fixes. -For this reason, you should not assume that a vulnerability present in the community 2.4.37 release is present in the RHEL package of the same version. -If you are trying to install a newer version of HTTPD for this reason, it is almost certainly not necessary. See the [Apache HTTPD versions supported by Red Hat](https://access.redhat.com/solutions/445713) article in the Red Hat knowledgebase for more information. + +You might notice that the latest version of HTTPD available from RHEL or other RHEL derivatives has a significantly older version number than the latest community release. +Specifically, RHEL 7 provides HTTPD 2.4.6, and RHEL 8 provides 2.4.37. It's very important to understand that Red Hat builds custom HTTPD packages for RHEL and backports security fixes. + +For this reason, don't assume that a vulnerability present in the community 2.4.37 release is present in the RHEL package of the same version. +If you're trying to install a newer version of HTTPD for this reason, it's almost certainly not necessary. See the [Apache HTTPD versions supported by Red Hat](https://access.redhat.com/solutions/445713) article in the Red Hat knowledgebase for more information. !!! Warning - Neither community HTTPD nor HTTPD built from source is supported by Red Hat. - If you are considering using any other source of HTTPD on a RHEL system, read the [Apache HTTPD versions supported by Red Hat](https://access.redhat.com/solutions/445713) article and ensure you understand the implications. + Red Hat doesn't support community HTTPD or HTTPD built from source. + If you're considering using any other source of HTTPD on a RHEL system, read the [Apache HTTPD versions supported by Red Hat](https://access.redhat.com/solutions/445713) article and make sure you understand the implications. ## Upgrading HTTPD on Windows -To upgrade to the latest version of HTTPD packaged for PEM by EDB, you should download the latest PostgreSQL or EDB Advanced Server installer for Windows and use +To upgrade to the latest version of HTTPD packaged for PEM by EDB, download the latest PostgreSQL or EDB Advanced Server installer for Windows and use [Stack Builder](/supported-open-source/postgresql/installing/using_stackbuilder/) or [StackBuilder Plus](/epas/latest/installing/windows/installing_advanced_server_with_the_interactive_installer/using_stackbuilder_plus/) respectively to update PEM-HTTPD. -PEM-HTTPD is located under the 'Web Development' category. +PEM-HTTPD is located under the Web Development category. ![PEM-HTTPD in Stack Builder](../images/stackbuilder_pemhttpd.png) From 8a35cefdc3e94142e8b6a2bfd93a892c846f7d4c Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Wed, 9 Aug 2023 19:15:22 +0100 Subject: [PATCH 11/38] fix for crashing deeptoc Signed-off-by: Dj Walker-Morgan --- src/components/table-of-contents.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/components/table-of-contents.js b/src/components/table-of-contents.js index 48c51d5437c..241c6dd6e50 100644 --- a/src/components/table-of-contents.js +++ b/src/components/table-of-contents.js @@ -31,7 +31,7 @@ const TableOfContents = ({ toc, deepToC }) => { > {item.title} - {deepToC && ( + {deepToC && item.items != undefined && (
    {item.items .filter((subitem) => subitem.title) From ffa2899933cb93c9dbf97ef4876062d6a88ed9c7 Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Wed, 9 Aug 2023 19:16:22 +0100 Subject: [PATCH 12/38] Restructure and rework Signed-off-by: Dj Walker-Morgan --- .../docs/pgd/5/cli/configuring_cli.mdx | 65 +++++++++++ .../docs/pgd/5/cli/discover_connections.mdx | 27 +++++ product_docs/docs/pgd/5/cli/index.mdx | 63 ++--------- .../docs/pgd/5/cli/installing_cli.mdx | 45 +++----- product_docs/docs/pgd/5/cli/using_cli.mdx | 102 ++++++++++++++++++ 5 files changed, 218 insertions(+), 84 deletions(-) create mode 100644 product_docs/docs/pgd/5/cli/configuring_cli.mdx create mode 100644 product_docs/docs/pgd/5/cli/discover_connections.mdx create mode 100644 product_docs/docs/pgd/5/cli/using_cli.mdx diff --git a/product_docs/docs/pgd/5/cli/configuring_cli.mdx b/product_docs/docs/pgd/5/cli/configuring_cli.mdx new file mode 100644 index 00000000000..28820a610d2 --- /dev/null +++ b/product_docs/docs/pgd/5/cli/configuring_cli.mdx @@ -0,0 +1,65 @@ +--- +title: "Configuring PGD CLI" +navTitle: "Configuring PGD CLI" +--- + +You can install PGD CLI on any system which is able to connect to, or is part of, the PGD cluster. You will require a user with Postgres superuser privileges (or equivalent, e.g., edb_admin on Big Animal PGD) to use PGD CLI. + +## PGD CLI and database connection strings + +You may not need a database connection string. For example, when Trusted Postgres Architect installs the PGD CLI on a system, it also configures the connection to the PGD cluster. This means that PGD CLI will automatically connect when run. + +If you are installing PGD CLI manually, you must give PGD CLI a database connection string so it knows which PGD cluster to connect to. + +!!! Important Setting passwords +PGD CLI does not interactively prompt for your user's password. You must pass your password using one of the following methods: + + 1. Adding an entry to your [`.pgpass` password file](https://www.postgresql.org/docs/current/libpq-pgpass.html) which includes the host, port, database name, user name, and password. + 1. Setting the password in the `PGPASSWORD` environment variable. + 1. Including the password in the connection string. + +We recommend the first option, as the other options don't scale well with multiple databases or compromise password confidentiality. +!!! + +If you don't know the database connection strings for your PGD-powered deployment, see [discovering connection strings](discover_connections). It is a guide to finding the right connection strings for your cluster. + +Once you have that information, you can continue. + +## Configuring the database to connect to + +PGD CLI takes its database connection information from either the PGD CLI configuration file or the command line. + + +### Using database connection strings in the command line + +You can pass the connection string directly to `pgd` using the `--dsn` option. For details, see the [sample use case](/pgd/latest/cli/#passing-a-database-connection-string). For example: + +```shell +pgd --dsn "host=bdr-a1 post=5432 user=enterprisedb" show-version +``` + +### Using a configuration file + +Use the `pgd-cli-config.yml` configuration file to specify the database connection string for your cluster. The configuration file must contain the database connection string for at least one PGD node in the cluster. The cluster name is optional and isn't validated. + +For example: + +```yaml +cluster: + name: cluster-name + endpoints: + - "host=bdr-a1 port=5432 dbname=bdrdb user=enterprisedb" + - "host=bdr-b1 port=5432 dbname=bdrdb user=enterprisedb" + - "host=bdr-c1 port=5432 dbname=bdrdb user=enterprisedb" +``` + +By default, `pgd-cli-config.yml` is located in the `/etc/edb/pgd-cli` directory. The PGD CLI searches for `pgd-cli-config.yml` in the following locations. Precedence order is high to low. + + 1. `/etc/edb/pgd-cli` (default) + 2. `$HOME/.edb/pgd-cli` + +If your configuration file is not in either of these directories, you can use the optional `-f` or `--config-file` flag on a `pgd` command to set which file should be read as configuration. See the [sample use case](/pgd/latest/cli/#passing-a-database-connection-string). + + + + diff --git a/product_docs/docs/pgd/5/cli/discover_connections.mdx b/product_docs/docs/pgd/5/cli/discover_connections.mdx new file mode 100644 index 00000000000..ecb901f2e68 --- /dev/null +++ b/product_docs/docs/pgd/5/cli/discover_connections.mdx @@ -0,0 +1,27 @@ +--- +title: "Discovering Connection Strings" +navTitle: "Discovering Connection Strings" +indexdepth: 2 +deepToC: true +--- + +PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with Postgres superuser privileges (or equivalent, e.g. edb_admin on Big Animal PGD) to use PGD CLI. + +## PGD CLI and database connection strings + +You may not need a database connection string. For example, when Trusted Postgres Architect installs the PGD CLI on a system, it also configures the connection to the PGD cluster. This means that PGD CLI will automatically connect when run. + +## Getting your database connection string + + +### For a TPA-deployed PGD cluster + + + +### For a BigAnimal distributed high-availability cluster + + + +### For an EDB PGD for Kubernetes deployed cluster + + diff --git a/product_docs/docs/pgd/5/cli/index.mdx b/product_docs/docs/pgd/5/cli/index.mdx index d000597c81d..7f46bfc1def 100644 --- a/product_docs/docs/pgd/5/cli/index.mdx +++ b/product_docs/docs/pgd/5/cli/index.mdx @@ -1,70 +1,27 @@ --- title: "EDB Postgres Distributed Command Line Interface" -navTitle: "Command line interface" +navTitle: "PGD CLI" indexCards: none navigation: - installing_cli +- using_cli +- configuring_cli +- discover_connections - command_ref directoryDefaults: description: "The PGD Command Line Interface (CLI) is a tool to manage your EDB Postgres Distributed cluster" --- -The EDB Postgres Distributed Command Line Interface (PGD CLI) is a tool for managing your EDB Postgres Distributed cluster. It allows you to run commands against EDB Postgres Distributed clusters. +The EDB Postgres Distributed Command Line Interface (PGD CLI) is a tool for managing your EDB Postgres Distributed cluster. It allows you to run commands against EDB Postgres Distributed clusters. It may be installed automatically on systems within a TPA-deployed PGD cluster or it can be installed manually on systems that can connect to any PGD cluster, including Big Animal Distributed High Availability PGD clusters or PGD clusters deployed using the EDB PGD for Kubernetes operator. -See the [Command reference](command_ref) for the available commands to inspect, manage, and get information about cluster resources. - -See [Installing PGD CLI](installing_cli) for information about how Trusted Postgres Architect deploys PGD CLI, how to install PGD CLI on a standalone server manually, and specifying connection strings. - -## Requirements - -The PGD CLI requires Postgres superuser privileges to run. - -## Using the PGD CLI -`pgd` is the command name for the PGD command line interface. See [pgd](command_ref) in the command reference for a description of the command options. - -## Specifying a configuration file - -If you rename the file or move it to another location, specify the new name and location using the optional `-f` or `--config-file` flag. For example: - -```sh - pgd show-nodes -f /opt/my-config.yml -``` - -## Passing a database connection string +See [Installing PGD CLI](installing_cli) for information about how to install PGD CLI, both automatically with Trusted Postgres Architect and manually. -Use the `--dsn` flag to pass a database connection string directly to a command. You don't need a configuration file if you pass the connection string with this flag. The flag takes precedence if a configuration file is present. For example: +See [Using PGD CLI](using_cli) for an introduction to using the PGD CLI and connecting to your PGD cluster. -```sh -pgd show-nodes --dsn "host=bdr-a1 port=5432 dbname=bdrdb user=postgres " -``` +See [Configuring PGD CLI](configuring_cli) for details on creating persistent configurations for quicker connections. -## Specifying the output format - -The PGD CLI supports the following output formats: - -| Format | Considerations | -| ------- | -------------- | -| tabular | Default format. Presents the data in tabular form.| -| json | Presents the raw data with no formatting. For some commands, the json output might show more data than in the tabular output, such as extra fields and more detailed messages. | -| yaml | Same as json except field order is alphabetical. Experimental and might not be fully supported in future versions. | - -Use the `-o` or `--output` flag to change the default output format to json or yaml. For example: - -```sh -pgd show-nodes -o json -``` - -## Accessing the command line help - -To list the supported commands, enter: - -```sh -pgd help -``` +See the [Command reference](command_ref) for the available commands to inspect, manage, and get information about cluster resources. -For help for a specific command and its parameters, enter `pgd help `. For example: +There is also a guide to [discovering connection strings](discover_connections) which shows how to obtain the correct connection strings for your PGD-powered deployment. -```sh -pgd help show-nodes -``` diff --git a/product_docs/docs/pgd/5/cli/installing_cli.mdx b/product_docs/docs/pgd/5/cli/installing_cli.mdx index 028070dbc62..8c5963cfaee 100644 --- a/product_docs/docs/pgd/5/cli/installing_cli.mdx +++ b/product_docs/docs/pgd/5/cli/installing_cli.mdx @@ -3,47 +3,30 @@ title: "Installing PGD CLI" navTitle: "Installing PGD CLI" --- +PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with Postgres superuser privileges (or equivalent, e.g. edb_admin on Big Animal PGD) to use PGD CLI. +## Installing automatically with Trusted Postgres Architect (TPA) By default, Trusted Postgres Architect installs and configures PGD CLI on each PGD node. If you want to install PGD CLI on any non-PGD instance in the cluster, attach the pgdcli role to that instance in Trusted Postgres Architect's configuration file before deploying. See [Trusted Postgres Architect](/tpa/latest/) for more information. -## Installing manually +## Installing manually on Linux -You can manually install the PGD CLI on any Linux machine using `.deb` and `.rpm` packages available from the EDB repository. The package name is `edb-pgd5-cli`. For example: +PGD CLI is installable from the EDB Repositories. These repositories require a token to enable downloads from them. You will need to login to [EDB Repos 2.0](https://www.enterprisedb.com/repos-downloads) to obtain your token. Then execute the following command, substituting +your token for ``. -```sh -# for Debian +### Add repository and install PGD CLI on Debian or Ubuntu + +```bash +curl -1sLf 'https://downloads.enterprisedb.com//postgres_distributed/setup.deb.sh' | sudo -E bash sudo apt-get install edb-pgd5-cli ``` -When Trusted Postgres Architect configures the PGD CLI, it connects automatically. With a manual installation to a standalone EDB Postgres Distributed cluster, you need to provide a connection string. - - -### Specifying database connection strings - -You can use a configuration file to specify the database connection strings for your cluster. Alternatively, you can pass the connection string directly to a command. For details, see the [sample use case](/pgd/latest/cli/#passing-a-database-connection-string). +### Add repository and install PGD CLI on RHEL, Rocky, AlmaLinux or Oracle Linux -#### Using a configuration file - -Use the `pgd-cli-config.yml` configuration file to specify the database connection string for your cluster. The configuration file must contain the database connection string for at least one PGD node in the cluster. The cluster name is optional and isn't validated. - -For example: - -```yaml -cluster: - name: cluster-name - endpoints: - - "host=bdr-a1 port=5432 dbname=bdrdb user=postgres " - - "host=bdr-b1 port=5432 dbname=bdrdb user=postgres " - - "host=bdr-c1 port=5432 dbname=bdrdb user=postgres " +```bash +curl -1sLf 'https://downloads.enterprisedb.com//postgres_distributed/setup.rpm.sh' | sudo -E bash +sudo yum install edb-pgd5-cli ``` -By default, `pgd-cli-config.yml` is located in the `/etc/edb/pgd-cli` directory. The PGD CLI searches for `pgd-cli-config.yml` in the following locations. Precedence order is high to low. - - 1. `/etc/edb/pgd-cli` (default) - 2. `$HOME/.edb/pgd-cli` +[Next: Using PGD CLI](using_cli) -If you rename the file or move it to another location, specify the new name and location using the optional `-f` or `--config-file` flag when entering a command. See the [sample use case](/pgd/latest/cli/#passing-a-database-connection-string). -!!! Note Avoiding stale data -The PGD CLI can return stale data on the state of the cluster if it's still connecting to nodes that were previously parted from the cluster. Edit the `pgd-cli-config.yml` file, or change your `--dsn` settings to ensure only active nodes in the cluster are listed for connection. -!!! diff --git a/product_docs/docs/pgd/5/cli/using_cli.mdx b/product_docs/docs/pgd/5/cli/using_cli.mdx new file mode 100644 index 00000000000..ad0c9b4bd21 --- /dev/null +++ b/product_docs/docs/pgd/5/cli/using_cli.mdx @@ -0,0 +1,102 @@ +--- +title: "Using PGD CLI" +navTitle: "Using PGD CLI" +--- + +## What is the PGD CLI + +The PGD CLI is a convenient way to connect to and manage your PGD cluster. You will need the credentials of a Postgres users with Postgres superuser privileges to use it. + +!!! Important Setting passwords +PGD CLI does not interactively prompt for your user's password. You must pass your password using one of the following methods: + + 1. Adding an entry to your [`.pgpass` password file](https://www.postgresql.org/docs/current/libpq-pgpass.html) which includes the host, port, database name, user name, and password. + 1. Setting the password in the `PGPASSWORD` environment variable. + 1. Including the password in the connection string. + +We recommend the first option, as the other options don't scale well with multiple databases or compromise password confidentiality. +!!! + +## Running the PGD CLI + +Once you have [installed pgd-cli](installing_cli), run the `pgd` command to access the PGD command line interface. The `pgd` command will need details of which host, port, and database to connect to, along with your username and password. + +## Passing a database connection string + +Use the `--dsn` flag to pass a database connection string to the `pgd` command. You don't need a configuration file when you pass the connection string with the `--dsn` flag. The flag takes precedence even if a configuration file is present. For example: + +```sh +pgd show-nodes --dsn "host=bdr-a1 port=5432 dbname=bdrdb user=enterprisedb" +``` + +See [pgd](command_ref) in the command reference for a description of the command options. + +## Specifying a configuration file + +If a `pgd-cli-config.yml` file is in `/etc/edb/pgd-cli` or `$HOME/.edb/pgd-cli`, `pgd` will automatically use it. You can override +this behavior using the optional `-f` or `--config-file` flag. For example: + +```sh +pgd show-nodes -f /opt/my-config.yml +Node Node ID Group Type Current State Target State Status Seq ID +---- ------- ----- ---- ------------- ------------ ------ ------ +p-vjljj303dk-a-1 2573417965 p-vjljj303dk-a data ACTIVE ACTIVE Up 1 +p-vjljj303dk-a-2 126163807 p-vjljj303dk-a data ACTIVE ACTIVE Up 2 +p-vjljj303dk-a-3 3521351376 p-vjljj303dk-a witness ACTIVE ACTIVE Up 3 +``` + + +## Specifying the output format + +Use the `-o` or `--output` flag to change the default output format to JSON or YAML. For example: + +```sh +pgd show-nodes -o json +[ + { + "node_id": 2573417965, + "node_name": "p-vjljj303dk-a-1", + "node_group_id": 4169125197, + "node_group_name": "p-vjljj303dk-a", + "node_kind_name": "data", + "current_state": "ACTIVE", + "target_state": "ACTIVE", + "status": "Up", + "node_seq_id": 1, + "node_local_dbname": "bdrdb", + "interface_connstr": "host=p-vjljj303dk-a-1-node.vmk31wilqpjeopka.biganimal.io user=streaming_replica sslmode=verify-full port=5432 sslkey=/controller/certificates/streaming_replica.key sslcert=/controller/certificates/streaming_replica.crt sslrootcert=/controller/certificates/server-ca.crt application_name=p-vjljj303dk-a-1 dbname=bdrdb", + "route_priority": -1, + "route_fence": false, + "route_writes": true, + "route_reads": true, + "route_dsn": "host=p-vjljj303dk-a-1-node.vmk31wilqpjeopka.biganimal.io user=streaming_replica sslmode=verify-full port=5432 sslkey=/controller/certificates/streaming_replica.key sslcert=/controller/certificates/streaming_replica.crt sslrootcert=/controller/certificates/server-ca.crt application_name=p-vjljj303dk-a-1 dbname=bdrdb" + }, +... +] +``` + +The PGD CLI supports the following output formats: + +| Setting | Format | Considerations | +| ------- | ------ | --------- | +| none | Tabular | Default format. This setting presents the data in tabular form.| +| `json` | JSON | Presents the raw data with no formatting. For some commands, the JSON output might show more data than the tabular output, such as extra fields and more detailed messages. | +| `yaml` | YAML |Similar to the JSON output, but as YAML and with the fields ordered alphabetically. Experimental and may not be fully supported in future versions. | + +## Accessing the command line help + +To list the supported commands, enter: + +```sh +pgd help +``` + +For help with a specific command and its parameters, enter `pgd help `. For example: + +```sh +pgd help show-nodes +``` + +!!! Note Avoiding stale data +The PGD CLI can return stale data on the state of the cluster if it's still connecting to nodes previously parted from the cluster. Edit the `pgd-cli-config.yml` file, or change your `--dsn` settings to ensure you are connecting to active nodes in the cluster. +!!! From dad26dcbe46c26d0cdb0e74a51cc87012a7243e4 Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Wed, 9 Aug 2023 19:16:44 +0100 Subject: [PATCH 13/38] Fixed phrasing Signed-off-by: Dj Walker-Morgan --- product_docs/docs/pgd/5/cli/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/pgd/5/cli/index.mdx b/product_docs/docs/pgd/5/cli/index.mdx index 7f46bfc1def..6f3c4bc10fd 100644 --- a/product_docs/docs/pgd/5/cli/index.mdx +++ b/product_docs/docs/pgd/5/cli/index.mdx @@ -23,5 +23,5 @@ See [Configuring PGD CLI](configuring_cli) for details on creating persistent co See the [Command reference](command_ref) for the available commands to inspect, manage, and get information about cluster resources. -There is also a guide to [discovering connection strings](discover_connections) which shows how to obtain the correct connection strings for your PGD-powered deployment. +There is also a guide to [discovering connection strings](discover_connections). It shows how to obtain the correct connection strings for your PGD-powered deployment. From bbb1c35690684705fbf13b31baf437baff89bd3b Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Thu, 10 Aug 2023 10:32:58 +0100 Subject: [PATCH 14/38] Corrected super user added connections Signed-off-by: Dj Walker-Morgan --- .../docs/pgd/5/cli/configuring_cli.mdx | 2 +- .../docs/pgd/5/cli/discover_connections.mdx | 59 ++++++++++++++++++- product_docs/docs/pgd/5/cli/index.mdx | 2 +- product_docs/docs/pgd/5/cli/using_cli.mdx | 2 +- 4 files changed, 60 insertions(+), 5 deletions(-) diff --git a/product_docs/docs/pgd/5/cli/configuring_cli.mdx b/product_docs/docs/pgd/5/cli/configuring_cli.mdx index 28820a610d2..95229b8b32c 100644 --- a/product_docs/docs/pgd/5/cli/configuring_cli.mdx +++ b/product_docs/docs/pgd/5/cli/configuring_cli.mdx @@ -3,7 +3,7 @@ title: "Configuring PGD CLI" navTitle: "Configuring PGD CLI" --- -You can install PGD CLI on any system which is able to connect to, or is part of, the PGD cluster. You will require a user with Postgres superuser privileges (or equivalent, e.g., edb_admin on Big Animal PGD) to use PGD CLI. +PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal PGD) to use PGD CLI. ## PGD CLI and database connection strings diff --git a/product_docs/docs/pgd/5/cli/discover_connections.mdx b/product_docs/docs/pgd/5/cli/discover_connections.mdx index ecb901f2e68..487e1570ba0 100644 --- a/product_docs/docs/pgd/5/cli/discover_connections.mdx +++ b/product_docs/docs/pgd/5/cli/discover_connections.mdx @@ -5,7 +5,7 @@ indexdepth: 2 deepToC: true --- -PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with Postgres superuser privileges (or equivalent, e.g. edb_admin on Big Animal PGD) to use PGD CLI. +PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal PGD) to use PGD CLI. ## PGD CLI and database connection strings @@ -13,15 +13,70 @@ You may not need a database connection string. For example, when Trusted Postgre ## Getting your database connection string +Every deployment method has a different way of ### For a TPA-deployed PGD cluster +Because TPA is so flexible, you will have to derive your connection string from your cluster configuration file (config.yml). You will need the name or IP address of a host with the role pgd-proxy listed for it. This host will have a proxy you can connect to. Usually the proxy will be listening on port 6432 (check the setting for `default_pgd_proxy_options` and `listen_port` in the config to confirm). The default database name is `bdrdb` (check the setting `bdr_database` in the config to confirm) and the default PGD superuser will be `enterprisedb`. +You can then assemble a connection string based on that information: -### For a BigAnimal distributed high-availability cluster +``` +"host= port= dbname= user= sslmode=require" +``` + +To illustrate this, here's some excerpts of a config.yml file for a cluster: + +```yaml +... +cluster_vars: + ... + bdr_database: bdrdb + ... + default_pgd_proxy_options: + listen_port: 6432 + ... + +instances: +- Name: kaboom + backup: kapok + location: dc1 + node: 1 + role: + - bdr + - pgd-proxy + networks: + - ipv4_address: 192.168.100.2 + name: tpanet +... +``` + +The connection string for this cluster would be: + +``` +"host=192.168.100.2 port=6432 dbname=bdrdb user=enterprisedb sslmode=require" +``` +!!! Note Host name versus IP address +In our example, we use the IP address because the configuration is from a Docker TPA install with no name resolution available. Generally, you should be able to use the host name as configured. +!!! +### For a BigAnimal distributed high-availability cluster + +1. Log into the [BigAnimal Clusters](https://portal.biganimal.com/clusters) view. +1. Select your cluster. +1. In the view of your cluster, select the Connect tab. +1. Copy the Read/Write URI from the connection info. This is your connection string. ### For an EDB PGD for Kubernetes deployed cluster +As with TPA, EDB PGD for Kubernetes is very flexible and there is no one way to obtain a connection string. It depends, in large part, on how the [Services](https://www.enterprisedb.com/docs/postgres_distributed_for_kubernetes/latest/connectivity/#services) have been configured for the deployment. If the Node Service Template is used, there should be direct connectivity to each node and proxy service available. If the Group Service Template, there will be a gateway service to each group. Finally, if the Proxy Service Template has been used, there should be a single proxy providing an entry point to the cluster for all applications. Consult your configuration file to determine this information. You should be able to establish a host name or IP address, port, database name (default: bdrdb) and username (default: enterprisedb). + +You can then assemble a connection string based on that information: + +``` +"host= port= dbname= user=" + +You may need to add "sslmode=" if the deployments configuration requires it. + diff --git a/product_docs/docs/pgd/5/cli/index.mdx b/product_docs/docs/pgd/5/cli/index.mdx index 6f3c4bc10fd..443ec13d74e 100644 --- a/product_docs/docs/pgd/5/cli/index.mdx +++ b/product_docs/docs/pgd/5/cli/index.mdx @@ -13,7 +13,7 @@ directoryDefaults: description: "The PGD Command Line Interface (CLI) is a tool to manage your EDB Postgres Distributed cluster" --- -The EDB Postgres Distributed Command Line Interface (PGD CLI) is a tool for managing your EDB Postgres Distributed cluster. It allows you to run commands against EDB Postgres Distributed clusters. It may be installed automatically on systems within a TPA-deployed PGD cluster or it can be installed manually on systems that can connect to any PGD cluster, including Big Animal Distributed High Availability PGD clusters or PGD clusters deployed using the EDB PGD for Kubernetes operator. +The EDB Postgres Distributed Command Line Interface (PGD CLI) is a tool for managing your EDB Postgres Distributed cluster. It allows you to run commands against EDB Postgres Distributed clusters. It may be installed automatically on systems within a TPA-deployed PGD cluster or it can be installed manually on systems that can connect to any PGD cluster, including BigAnimal Distributed High Availability PGD clusters or PGD clusters deployed using the EDB PGD for Kubernetes operator. See [Installing PGD CLI](installing_cli) for information about how to install PGD CLI, both automatically with Trusted Postgres Architect and manually. diff --git a/product_docs/docs/pgd/5/cli/using_cli.mdx b/product_docs/docs/pgd/5/cli/using_cli.mdx index ad0c9b4bd21..d91755360f8 100644 --- a/product_docs/docs/pgd/5/cli/using_cli.mdx +++ b/product_docs/docs/pgd/5/cli/using_cli.mdx @@ -5,7 +5,7 @@ navTitle: "Using PGD CLI" ## What is the PGD CLI -The PGD CLI is a convenient way to connect to and manage your PGD cluster. You will need the credentials of a Postgres users with Postgres superuser privileges to use it. +The PGD CLI is a convenient way to connect to and manage your PGD cluster. You will need the credentials of a Postgres users with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal PGD) to use it. !!! Important Setting passwords PGD CLI does not interactively prompt for your user's password. You must pass your password using one of the following methods: From b072c5418ba47cbe5c5a97fd650ca665ebab54a4 Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Thu, 10 Aug 2023 10:34:22 +0100 Subject: [PATCH 15/38] typoooos fixed Signed-off-by: Dj Walker-Morgan --- product_docs/docs/pgd/5/cli/discover_connections.mdx | 3 ++- product_docs/docs/pgd/5/cli/installing_cli.mdx | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/product_docs/docs/pgd/5/cli/discover_connections.mdx b/product_docs/docs/pgd/5/cli/discover_connections.mdx index 487e1570ba0..356197a85b6 100644 --- a/product_docs/docs/pgd/5/cli/discover_connections.mdx +++ b/product_docs/docs/pgd/5/cli/discover_connections.mdx @@ -76,7 +76,8 @@ You can then assemble a connection string based on that information: ``` "host= port= dbname= user=" +``` -You may need to add "sslmode=" if the deployments configuration requires it. +You may need to add "sslmode=" if the deployment's configuration requires it. diff --git a/product_docs/docs/pgd/5/cli/installing_cli.mdx b/product_docs/docs/pgd/5/cli/installing_cli.mdx index 8c5963cfaee..2a58d6a86d9 100644 --- a/product_docs/docs/pgd/5/cli/installing_cli.mdx +++ b/product_docs/docs/pgd/5/cli/installing_cli.mdx @@ -3,7 +3,7 @@ title: "Installing PGD CLI" navTitle: "Installing PGD CLI" --- -PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with Postgres superuser privileges (or equivalent, e.g. edb_admin on Big Animal PGD) to use PGD CLI. +PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal PGD) to use PGD CLI. ## Installing automatically with Trusted Postgres Architect (TPA) By default, Trusted Postgres Architect installs and configures PGD CLI on each PGD node. If you want to install PGD CLI on any non-PGD instance in the cluster, attach the pgdcli role to that instance in Trusted Postgres Architect's configuration file before deploying. See [Trusted Postgres Architect](/tpa/latest/) for more information. From 717f9fe6e6dcd9ee574065a0699fca2ea9b24ac0 Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Thu, 10 Aug 2023 10:48:01 +0100 Subject: [PATCH 16/38] Naming fix for BA DHA Signed-off-by: Dj Walker-Morgan --- product_docs/docs/pgd/5/cli/configuring_cli.mdx | 2 +- product_docs/docs/pgd/5/cli/discover_connections.mdx | 6 +++--- product_docs/docs/pgd/5/cli/installing_cli.mdx | 2 +- product_docs/docs/pgd/5/cli/using_cli.mdx | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/product_docs/docs/pgd/5/cli/configuring_cli.mdx b/product_docs/docs/pgd/5/cli/configuring_cli.mdx index 95229b8b32c..4c65547db54 100644 --- a/product_docs/docs/pgd/5/cli/configuring_cli.mdx +++ b/product_docs/docs/pgd/5/cli/configuring_cli.mdx @@ -3,7 +3,7 @@ title: "Configuring PGD CLI" navTitle: "Configuring PGD CLI" --- -PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal PGD) to use PGD CLI. +PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal distributed high-availability) to use PGD CLI. ## PGD CLI and database connection strings diff --git a/product_docs/docs/pgd/5/cli/discover_connections.mdx b/product_docs/docs/pgd/5/cli/discover_connections.mdx index 356197a85b6..e0f89c54e2e 100644 --- a/product_docs/docs/pgd/5/cli/discover_connections.mdx +++ b/product_docs/docs/pgd/5/cli/discover_connections.mdx @@ -5,7 +5,7 @@ indexdepth: 2 deepToC: true --- -PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal PGD) to use PGD CLI. +PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal distributed high-availability) to use PGD CLI. ## PGD CLI and database connection strings @@ -70,7 +70,7 @@ In our example, we use the IP address because the configuration is from a Docker ### For an EDB PGD for Kubernetes deployed cluster -As with TPA, EDB PGD for Kubernetes is very flexible and there is no one way to obtain a connection string. It depends, in large part, on how the [Services](https://www.enterprisedb.com/docs/postgres_distributed_for_kubernetes/latest/connectivity/#services) have been configured for the deployment. If the Node Service Template is used, there should be direct connectivity to each node and proxy service available. If the Group Service Template, there will be a gateway service to each group. Finally, if the Proxy Service Template has been used, there should be a single proxy providing an entry point to the cluster for all applications. Consult your configuration file to determine this information. You should be able to establish a host name or IP address, port, database name (default: bdrdb) and username (default: enterprisedb). +As with TPA, EDB PGD for Kubernetes is very flexible and there is no one way to obtain a connection string. It depends, in large part, on how the [Services](https://www.enterprisedb.com/docs/postgres_distributed_for_kubernetes/latest/connectivity/#services) have been configured for the deployment. If the Node Service Template is used, there should be direct connectivity to each node and proxy service available. If the Group Service Template, there will be a gateway service to each group. Finally, if the Proxy Service Template has been used, there should be a single proxy providing an entry point to the cluster for all applications. Consult your configuration file to determine this information. You should be able to establish a host name or IP address, port, database name (default: `bdrdb`) and username (default: `enterprisedb`). You can then assemble a connection string based on that information: @@ -78,6 +78,6 @@ You can then assemble a connection string based on that information: "host= port= dbname= user=" ``` -You may need to add "sslmode=" if the deployment's configuration requires it. +You may need to add `sslmode=` if the deployment's configuration requires it. diff --git a/product_docs/docs/pgd/5/cli/installing_cli.mdx b/product_docs/docs/pgd/5/cli/installing_cli.mdx index 2a58d6a86d9..0df2556e8e6 100644 --- a/product_docs/docs/pgd/5/cli/installing_cli.mdx +++ b/product_docs/docs/pgd/5/cli/installing_cli.mdx @@ -3,7 +3,7 @@ title: "Installing PGD CLI" navTitle: "Installing PGD CLI" --- -PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal PGD) to use PGD CLI. +PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal distributed high-availability) to use PGD CLI. ## Installing automatically with Trusted Postgres Architect (TPA) By default, Trusted Postgres Architect installs and configures PGD CLI on each PGD node. If you want to install PGD CLI on any non-PGD instance in the cluster, attach the pgdcli role to that instance in Trusted Postgres Architect's configuration file before deploying. See [Trusted Postgres Architect](/tpa/latest/) for more information. diff --git a/product_docs/docs/pgd/5/cli/using_cli.mdx b/product_docs/docs/pgd/5/cli/using_cli.mdx index d91755360f8..7c26a33afb2 100644 --- a/product_docs/docs/pgd/5/cli/using_cli.mdx +++ b/product_docs/docs/pgd/5/cli/using_cli.mdx @@ -5,7 +5,7 @@ navTitle: "Using PGD CLI" ## What is the PGD CLI -The PGD CLI is a convenient way to connect to and manage your PGD cluster. You will need the credentials of a Postgres users with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal PGD) to use it. +The PGD CLI is a convenient way to connect to and manage your PGD cluster. You will need the credentials of a Postgres users with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal distributed high availability) to use it. !!! Important Setting passwords PGD CLI does not interactively prompt for your user's password. You must pass your password using one of the following methods: From 05dd29097a1ec1bb41fae47dff6e913524e77974 Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan <126472455+djw-m@users.noreply.github.com> Date: Thu, 10 Aug 2023 13:42:56 +0100 Subject: [PATCH 17/38] Update product_docs/docs/pgd/5/cli/configuring_cli.mdx Co-authored-by: Dee Dee Rothery <83650384+drothery-edb@users.noreply.github.com> --- product_docs/docs/pgd/5/cli/configuring_cli.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/pgd/5/cli/configuring_cli.mdx b/product_docs/docs/pgd/5/cli/configuring_cli.mdx index 4c65547db54..92ba5f7ba80 100644 --- a/product_docs/docs/pgd/5/cli/configuring_cli.mdx +++ b/product_docs/docs/pgd/5/cli/configuring_cli.mdx @@ -3,7 +3,7 @@ title: "Configuring PGD CLI" navTitle: "Configuring PGD CLI" --- -PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal distributed high-availability) to use PGD CLI. +PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security) - or equivalent (for example, edb_admin on BigAnimal distributed high-availability) to use PGD CLI. ## PGD CLI and database connection strings From 62c5fb8b21b75b774805c014d02173b09aa67116 Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Thu, 10 Aug 2023 13:51:45 +0100 Subject: [PATCH 18/38] Fix security links Signed-off-by: Dj Walker-Morgan --- product_docs/docs/pgd/5/cli/discover_connections.mdx | 2 +- product_docs/docs/pgd/5/cli/installing_cli.mdx | 2 +- product_docs/docs/pgd/5/cli/using_cli.mdx | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/product_docs/docs/pgd/5/cli/discover_connections.mdx b/product_docs/docs/pgd/5/cli/discover_connections.mdx index e0f89c54e2e..73470c05c1b 100644 --- a/product_docs/docs/pgd/5/cli/discover_connections.mdx +++ b/product_docs/docs/pgd/5/cli/discover_connections.mdx @@ -5,7 +5,7 @@ indexdepth: 2 deepToC: true --- -PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal distributed high-availability) to use PGD CLI. +PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security) - or equivalent (e.g. edb_admin on BigAnimal distributed high-availability) to use PGD CLI. ## PGD CLI and database connection strings diff --git a/product_docs/docs/pgd/5/cli/installing_cli.mdx b/product_docs/docs/pgd/5/cli/installing_cli.mdx index 0df2556e8e6..2ec709cf331 100644 --- a/product_docs/docs/pgd/5/cli/installing_cli.mdx +++ b/product_docs/docs/pgd/5/cli/installing_cli.mdx @@ -3,7 +3,7 @@ title: "Installing PGD CLI" navTitle: "Installing PGD CLI" --- -PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal distributed high-availability) to use PGD CLI. +PGD CLI can be installed on any system which is able to connect to the PGD cluster. You will require a user with PGD superuser privileges - the [bdr_superuser role](../security) - or equivalent (e.g. edb_admin on BigAnimal distributed high-availability) to use PGD CLI. ## Installing automatically with Trusted Postgres Architect (TPA) By default, Trusted Postgres Architect installs and configures PGD CLI on each PGD node. If you want to install PGD CLI on any non-PGD instance in the cluster, attach the pgdcli role to that instance in Trusted Postgres Architect's configuration file before deploying. See [Trusted Postgres Architect](/tpa/latest/) for more information. diff --git a/product_docs/docs/pgd/5/cli/using_cli.mdx b/product_docs/docs/pgd/5/cli/using_cli.mdx index 7c26a33afb2..3ea54d77ce3 100644 --- a/product_docs/docs/pgd/5/cli/using_cli.mdx +++ b/product_docs/docs/pgd/5/cli/using_cli.mdx @@ -5,7 +5,7 @@ navTitle: "Using PGD CLI" ## What is the PGD CLI -The PGD CLI is a convenient way to connect to and manage your PGD cluster. You will need the credentials of a Postgres users with PGD superuser privileges - the [bdr_superuser role](../security - or equivalent (e.g. edb_admin on BigAnimal distributed high availability) to use it. +The PGD CLI is a convenient way to connect to and manage your PGD cluster. You will need the credentials of a Postgres users with PGD superuser privileges - the [bdr_superuser role](../security) - or equivalent (e.g. edb_admin on BigAnimal distributed high availability) to use it. !!! Important Setting passwords PGD CLI does not interactively prompt for your user's password. You must pass your password using one of the following methods: From c75278ad7423671d3274b45c2a3c8da3a6f32c6b Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Thu, 10 Aug 2023 17:14:07 +0100 Subject: [PATCH 19/38] Filled in missing para accidently command-z'd away. Signed-off-by: Dj Walker-Morgan --- product_docs/docs/pgd/5/cli/discover_connections.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/pgd/5/cli/discover_connections.mdx b/product_docs/docs/pgd/5/cli/discover_connections.mdx index 73470c05c1b..1ec59af2896 100644 --- a/product_docs/docs/pgd/5/cli/discover_connections.mdx +++ b/product_docs/docs/pgd/5/cli/discover_connections.mdx @@ -13,7 +13,7 @@ You may not need a database connection string. For example, when Trusted Postgre ## Getting your database connection string -Every deployment method has a different way of +Every deployment method has a different way of deriving a connection string for it. This is because of the range of different configurations that PGD supports. Generally, you can obtain the required information from the configuration of your deployment; this section provides a guide of how to assemble that information into connection strings. ### For a TPA-deployed PGD cluster From 29e23aaefa88f49ce73be0427756e5a28ed05c1e Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Thu, 10 Aug 2023 18:31:11 +0100 Subject: [PATCH 20/38] Added more usernames for default PDG SUs Signed-off-by: Dj Walker-Morgan --- product_docs/docs/pgd/5/cli/discover_connections.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/product_docs/docs/pgd/5/cli/discover_connections.mdx b/product_docs/docs/pgd/5/cli/discover_connections.mdx index 1ec59af2896..61bde0b5ed4 100644 --- a/product_docs/docs/pgd/5/cli/discover_connections.mdx +++ b/product_docs/docs/pgd/5/cli/discover_connections.mdx @@ -17,7 +17,7 @@ Every deployment method has a different way of deriving a connection string for ### For a TPA-deployed PGD cluster -Because TPA is so flexible, you will have to derive your connection string from your cluster configuration file (config.yml). You will need the name or IP address of a host with the role pgd-proxy listed for it. This host will have a proxy you can connect to. Usually the proxy will be listening on port 6432 (check the setting for `default_pgd_proxy_options` and `listen_port` in the config to confirm). The default database name is `bdrdb` (check the setting `bdr_database` in the config to confirm) and the default PGD superuser will be `enterprisedb`. +Because TPA is so flexible, you will have to derive your connection string from your cluster configuration file (config.yml). You will need the name or IP address of a host with the role pgd-proxy listed for it. This host will have a proxy you can connect to. Usually the proxy will be listening on port 6432 (check the setting for `default_pgd_proxy_options` and `listen_port` in the config to confirm). The default database name is `bdrdb` (check the setting `bdr_database` in the config to confirm) and the default PGD superuser will be `enterprisedb` for EPAS and `postgres` for Postgres and Postgres Extended. You can then assemble a connection string based on that information: @@ -70,7 +70,7 @@ In our example, we use the IP address because the configuration is from a Docker ### For an EDB PGD for Kubernetes deployed cluster -As with TPA, EDB PGD for Kubernetes is very flexible and there is no one way to obtain a connection string. It depends, in large part, on how the [Services](https://www.enterprisedb.com/docs/postgres_distributed_for_kubernetes/latest/connectivity/#services) have been configured for the deployment. If the Node Service Template is used, there should be direct connectivity to each node and proxy service available. If the Group Service Template, there will be a gateway service to each group. Finally, if the Proxy Service Template has been used, there should be a single proxy providing an entry point to the cluster for all applications. Consult your configuration file to determine this information. You should be able to establish a host name or IP address, port, database name (default: `bdrdb`) and username (default: `enterprisedb`). +As with TPA, EDB PGD for Kubernetes is very flexible and there is no one way to obtain a connection string. It depends, in large part, on how the [Services](https://www.enterprisedb.com/docs/postgres_distributed_for_kubernetes/latest/connectivity/#services) have been configured for the deployment. If the Node Service Template is used, there should be direct connectivity to each node and proxy service available. If the Group Service Template, there will be a gateway service to each group. Finally, if the Proxy Service Template has been used, there should be a single proxy providing an entry point to the cluster for all applications. Consult your configuration file to determine this information. You should be able to establish a host name or IP address, port, database name (default: `bdrdb`) and username (`enterprisedb` for EPAS and `postgres` for Postgres and Postgres Extended.). You can then assemble a connection string based on that information: From c85ce2ebf6ecc2286da22a4d23100e77902b1da2 Mon Sep 17 00:00:00 2001 From: nidhibhammar <59045594+nidhibhammar@users.noreply.github.com> Date: Fri, 11 Aug 2023 13:28:23 +0530 Subject: [PATCH 21/38] Update product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx --- .../03_using__row_type_in_record_declarations.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx b/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx index 77d6e812c82..85b6c134677 100644 --- a/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx +++ b/product_docs/docs/epas/15/application_programming/epas_compat_spl/03_variable_declarations/03_using__row_type_in_record_declarations.mdx @@ -14,7 +14,7 @@ The `%TYPE` attribute provides an easy way to create a variable that depends on A *record* is a named, ordered collection of fields. A *field* is similar to a variable. It has an identifier and data type but has the additional property of belonging to a record. You must refereence it using dot notation with the record name as its qualifier. -## Sytax +## Syntax You can use the `%ROWTYPE` attribute to declare a record. The `%ROWTYPE` attribute is prefixed by a table name. Each column in the named table defines an identically named field in the record with the same data type as the column. From 8c77d1b24af15e910a5cc98f1ca31ca9f316f892 Mon Sep 17 00:00:00 2001 From: Dee Dee Rothery <83650384+drothery-edb@users.noreply.github.com> Date: Fri, 11 Aug 2023 09:22:27 -0400 Subject: [PATCH 22/38] fixed missed cluster name changes --- .../biganimal/release/overview/05_database_version_policy.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/product_docs/docs/biganimal/release/overview/05_database_version_policy.mdx b/product_docs/docs/biganimal/release/overview/05_database_version_policy.mdx index 1bb3d0346b3..3b626e8258c 100644 --- a/product_docs/docs/biganimal/release/overview/05_database_version_policy.mdx +++ b/product_docs/docs/biganimal/release/overview/05_database_version_policy.mdx @@ -9,8 +9,8 @@ We support the major Postgres versions from the date they're made available unti | Postgres distribution | Versions | | ---------------------------- | --------------------------------------------------- | | PostgreSQL | 11–15 | -| EDB Postgres Advanced Server | 11–15, 14-15 for extreme-high-availability clusters | -| EDB Postgres Extended Server | 14-15 for extreme-high-availability clusters | +| EDB Postgres Advanced Server | 11–15, 14-15 for distributed high-availability clusters | +| EDB Postgres Extended Server | 14-15 for distibuted high-availability clusters | ## End-of-life policy From dbfb424a44d9dd0c94b2bab23fdb1ecf8e003b03 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Fri, 11 Aug 2023 09:54:53 -0400 Subject: [PATCH 23/38] more instances --- .../creating_a_cluster/creating_an_eha_cluster.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/creating_an_eha_cluster.mdx b/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/creating_an_eha_cluster.mdx index 87a3f8b4986..fc55060c4d4 100644 --- a/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/creating_an_eha_cluster.mdx +++ b/product_docs/docs/biganimal/release/getting_started/creating_a_cluster/creating_an_eha_cluster.mdx @@ -1,10 +1,10 @@ --- -title: Creating an extreme-high-availability cluster +title: Creating a distributed high-availability cluster --- -When you create an extreme-high-availability cluster, you need to set up the data group. Extreme-high-availability clusters can contain one or two data groups. +When you create a distributed high-availability cluster, you need to set up the data group. Distributed high-availability clusters can contain one or two data groups. -1. After specifying **Extreme High Availability** as your cluster type on the **Cluster Info** tab and your cluster name and password on the **Cluster Settings** tab, select **Next: Data Groups**. +1. After specifying **Distributed High Availability** as your cluster type on the **Cluster Info** tab and your cluster name and password on the **Cluster Settings** tab, select **Next: Data Groups**. 1. On the **Nodes Settings** tab, in the **Nodes** section, select **Two Data Nodes** or **Three Data Nodes**. @@ -32,7 +32,7 @@ When you create an extreme-high-availability cluster, you need to set up the dat ## Creating a second data group -After creating the first data group, you can create a second data group for your extreme-high-availability cluster by selecting **Add a Data Group** before you create the cluster. +After creating the first data group, you can create a second data group for your distributed high-availability cluster by selecting **Add a Data Group** before you create the cluster. By default, the settings for your first data group populate the second data group's settings. However, if you want to change certain settings you can. Just know that your changes can change the settings for the entire cluster. That being said, the database type and cloud provider must be consistent across both data groups. The data groups and the witness group must all be in different regions. Otherwise, you can choose the second data group's settings as needed. From 4d5f6426389f18d26111e5902a122b2c8462f483 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Fri, 11 Aug 2023 09:56:11 -0400 Subject: [PATCH 24/38] fixed typo --- .../biganimal/release/overview/05_database_version_policy.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/biganimal/release/overview/05_database_version_policy.mdx b/product_docs/docs/biganimal/release/overview/05_database_version_policy.mdx index 3b626e8258c..fd770d45126 100644 --- a/product_docs/docs/biganimal/release/overview/05_database_version_policy.mdx +++ b/product_docs/docs/biganimal/release/overview/05_database_version_policy.mdx @@ -10,7 +10,7 @@ We support the major Postgres versions from the date they're made available unti | ---------------------------- | --------------------------------------------------- | | PostgreSQL | 11–15 | | EDB Postgres Advanced Server | 11–15, 14-15 for distributed high-availability clusters | -| EDB Postgres Extended Server | 14-15 for distibuted high-availability clusters | +| EDB Postgres Extended Server | 14-15 for distributed high-availability clusters | ## End-of-life policy From 8d75fe509d4c54a863ed4875192dd9a24e3e4d50 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Mon, 7 Aug 2023 16:18:46 -0400 Subject: [PATCH 25/38] BigAnimal: updates to PGD CLI commands --- .../reference/cli/managing_clusters.mdx | 247 ++++++++++++++++-- .../release/reference/cli/using_features.mdx | 14 +- 2 files changed, 235 insertions(+), 26 deletions(-) diff --git a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx index 889ad75812f..f3d6e8b5684 100644 --- a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx @@ -6,9 +6,9 @@ deepToC: true These examples show Azure as the cloud provider unless indicated otherwise. The functionality is the same when using AWS or Google Cloud. -## Using `cluster` commands +## Managing single-node and high-availability clusters -Use the `cluster` commands to create, retrieve information on, and manage single node and high-availability clusters. +Use the `cluster` commands to create, retrieve information on, and manage single-node and high-availability clusters. ### Create a cluster in interactive mode @@ -39,8 +39,7 @@ Networking: Public By default your cluster allows all inbound communications, add IP allowed list to restrict the access: Yes Add CIDR blocks "192.168.1.1/16=Sample Description" leave empty to stop adding: Add database config in the format "application_name=sample_app&array_nulls=true", Leave empty for default configuration: -Backup Retention Period, note backups will incur storage charges from the cloud provider directly. e.g. "7d", "2w" or "3m": 30d - +Backup Retention Period, note backups will incur storage charges from the cloud provider directly. e.g. "7d", "2w" or "3m": 30d ``` You're prompted to confirm that you want to create the cluster. After the cluster creation process is complete, it generates a cluster ID. @@ -68,8 +67,8 @@ __OUTPUT__ ├──────────────┼──────────────────────┼──────────┼──────────────┼──────────────────────────┼─────────────┼───────────────┼───────────────────────────────┼────────────────────┼────────────┤ │ p-gxhkfww1fe │ my-biganimal-cluster │ Azure │ ha │ Cluster in healthy state │ East US │ E2s v3 │ EDB Postgres Advanced Server │ Disabled │ N/A │ │ │ │ │ │ │ │ │ │ │ │ -└──────────────┴──────────────────────┴──────────┴──────────────┴──────────────────────────┴─────────────┴───────────────┴───────────────────────────────┴────────────────────┴────────────┘``` - +└──────────────┴──────────────────────┴──────────┴──────────────┴──────────────────────────┴─────────────┴───────────────┴───────────────────────────────┴────────────────────┴────────────┘ +``` ### Create a cluster using a configuration file @@ -250,7 +249,140 @@ You can list all deleted clusters using the `show-deleted-clusters` command and ### Restore a cluster -BigAnimal continuously backs up your PostgrSQL clusters. Using the CLI, you can restore a cluster from its backup to any point in time as long as the backups are retained in the backup storage. The restored cluster can be in another region and with different configurations. You can specify new configurations in the `cluster restore` command. For example: +BigAnimal continuously backs up your PostgreSQL clusters. Using the CLI, you can restore a cluster from its backup to any point in time as long as the backups are retained in the backup storage. The restored cluster can be in another region and with different configurations. You can specify new configurations in the `cluster restore` command. For example: + +```shell +biganimal cluster restore\ + --name "my-biganimal-cluster" \ + --provider "azure" \ + --region "eastus" \ + --password "mypassword@123" \ + --new-name "my-biganimal-cluster-restored" \ + --new-region="eastus2" \ + --cluster-architecture "single" \ + --instance-type "azure:Standard_E2s_v3" \ + --volume-type "azurepremiumstorage" \ + --volume-property "P1" \ + --networking "public" \ + --cidr-blocks="10.10.10.10/27=Traffic from App B" \ + --restore-point "2022-01-26T15:04:05+0800" \ + --backup-retention-period "2w" \ + --read-only-workloads: "true" +``` + +The password for the restored cluster is mandatory. The other parameters, if not specified, inherit the source database's settings. + +To restore a deleted cluster, use the `--from-deleted` flag in the command. + +!!! Note +You can restore a cluster in a single cluster to a high-availability cluster and vice versa. You can restore an extreme-high-availability cluster only to a cluster using the same architecture. +!!! + +### Get cluster connection information + +To use your BigAnimal cluster, you first need to get your cluster's connection information. To get your cluster's connection information, use the `cluster show-connection` command: + +```shell +biganimal cluster show-connection \ + --name "my-biganimal-cluster" \ + --provider "azure" \ + --region "eastus" +__OUTPUT__ +┌─────────────┬──────────────────────────────────────────────────────────────────────────────────────────┐ +│ Access Type │ Connection String │ +├─────────────┼──────────────────────────────────────────────────────────────────────────────────────────┤ +│ read-write │ postgresql://edb_admin@p-gxhkfww1fe.30glixgayvwhtmn3.enterprisedb.network:5432/edb_admin │ +│ read-only │ Disabled │ +└─────────────┴──────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +!!!tip +You can query the complete connection information with other output formats, like JSON or YAML. For example: + +```shell +biganimal cluster show-connection \ + --name "my-biganimal-cluster" \ + --provider "azure" \ + --region "eastus" \ + --output "json" +``` +!!! + +### Update cluster + +After the cluster is created, you can update attributes of the cluster, including both the cluster’s profile and its deployment architecture. You can update the following attributes: + +- Cluster name +- Password of administrator account +- Cluster architecture +- Number of standby replicas +- Instance type of cluster +- Instance volume properties +- Networking +- Allowed IP list +- Postgres database configuration +- Volume properties, size, IOPS +- Retention period +- Read-only workloads +- IAM authentication + +For example, to set the public allowed IP range list, use the `--cidr-blocks` flag: + +```shell +./biganimal cluster update --name "my-biganimal-cluster" --provider "azure" \ + --region "eastus" \ + --cidr-blocks "9.9.9.9/28=Traffic from App A" +``` + +To check whether the setting took effect, use the `cluster show` command, and view the detailed cluster information output in JSON format. For example: + +```shell +biganimal cluster show --name "my-biganimal-cluster" --provider "azure" \ + --region "eastus" \ + --output "json" \ +| jq '.[0].allowIpRangeMap' +__OUTPUT__ +[ + [ + "9.9.9.9/28", + "Traffic from App A" + ] +] +``` + +### Update the Postgres configuration of a cluster + +To update the Postgres configuration of a BigAnimal cluster directly from the CLI: + +```shell +biganimal cluster update --id "p-gxhkfww1fe" \ + --pg-config "application_name=ba_test_app,array_nulls=false" +__OUTPUT__ +Update Cluster operation is started +Cluster ID is "p-gxhkfww1fe" +``` +To specify multiple configurations, you can use multiple `--pg-config` flags or include multiple configuration settings as a key-value array string separated by commas in one `--pg-config` flag. If a Postgres setting contains a comma, you need to specify it with a separate `--pg-config` flag. + +!!! Note +You can update the cluster architecture with the `--cluster-architecture` flag. The only supported scenario is to update a single-node cluster to a high-availability cluster. +!!! + +### Delete a cluster + +To delete a cluster you no longer need, use the `cluster delete` command. For example: + +```shell +biganimal cluster delete \ + --name "my-biganimal-cluster" \ + --provider "azure" \ + --region "eastus" +``` + +You can list all deleted clusters using the `show-deleted-clusters` command and restore them from their history backups as needed. + + +### Restore a cluster +BigAnimal continuously backs up your PostgreSQL clusters. Using the CLI, you can restore a cluster from its backup to any point in time as long as the backups are retained in the backup storage. The restored cluster can be in another region and with different configurations. You can specify new configurations in the `cluster restore` command. For example: ```shell biganimal cluster restore\ @@ -279,14 +411,27 @@ To restore a deleted cluster, use the `--from-deleted` flag in the command. You can restore a cluster in a single cluster to a high-availability cluster and vice versa. You can restore an extreme-high-availability cluster only to a cluster using the same architecture. !!! -## Using `pgd` commands -Use the `pgd` commands to create, retrieve information on, and manage extreme-high-availability clusters. +## Managing extreme-high-availability clusters + +Use the BigAnimal `pgd` commands to create, retrieve information on, and manage extreme-high-availability clusters. +In addition to these BigAnimal `pgd` CLI commands, you can use [EDB Postgres Distributed CLI commands](/pgd/latest/cli/) to manage your extreme-high-availability clusters. -### `pgd create` +!!!warning +Don't use the EDB Postgres Distributed `create-proxy` and `delete-proxy` CLI commands with your BigAnimal distributed high-availability clusters. -Create an extreme-high-availability cluster using a YAML configuration file. For example, +### Create an extreme-high-availability cluster + +Create an extreme-high-availability cluster using a YAML configuration file. + +The syntax of the command is: + +``` +biganimal pgd create --config-file +``` + +Where `` is a valid path to a YAML configuration file. For example: ``` clusterName: pgd-cli-name @@ -330,9 +475,17 @@ witnessGroups: - region: uksouth ``` -### `pgd add-group` +### Add a data group -Add a data group using a YAML configuration file. For example: +Add a data group using a YAML configuration file. + +The syntax of the command is: + +``` +biganimal pgd add-group --config-file +``` + +Where `` is a valid path to a YAML configuration file. For example: ``` clusterId: clusterID @@ -360,22 +513,70 @@ witnessGroups: - region: westus2 ``` -### `pgd update` +### Update an extreme-high-availability cluster -Update an extreme-high-availability cluster and its data groups using a `pgd cluster` configuration file. +Update an extreme-high-availability cluster and its data groups using a YAML configuration file. -### `pgd show` +The syntax of the command is: -Show active or deleted extreme-high-availability clusters. +``` +pgd update [--config-file] +``` + +Where `` is a valid path to a YAML configuration file with the same format as a configuration file for creating an extreme-high-availability cluster. See [Create an extreme-high-availability cluster](#create-an-extreme-high-availability-cluster). + +### Show extreme-high-availability clusters -### `pgd restore` +Show all active clusters or a specific cluster. You can also optionally show deleted clusters. -Restore an extreme-high-availability cluster or a deleted extreme-high-availability cluster to a new cluster on same cloud provider. You can restore an active cluster or a deleted cluster within its retention period You can restore only one data group. By default, the new cluster inherits all settings of the source cluster. You can change the cluster setting and database configurations by specifying new values in the restore command. +The syntax of the command is: + +``` +biganimal pgd show [--id] [--deleted] +``` -### `pgd show-group-connection` +### Restore an extreme-high-availability cluster -Get the connection string of the desired Postgres cluster. +Restore an extreme-high-availability cluster or a deleted extreme-high-availability cluster to a new cluster on the same cloud provider. You can restore an active cluster or a deleted cluster within its retention period. You can only restore one data group. By default, the new cluster inherits all settings of the source cluster. You can change the cluster setting and database configurations by specifying new values in the configuration file. -### `pgd show-group-monitoring-urls` +The syntax of the command is: -Show extreme-high-availability group monitoring URLs. \ No newline at end of file +``` +pgd restore [--config-file] +``` + +Where `` is a valid path to a YAML configuration file. For example: + +``` +clusterName: pgd-restore-name +password: Meredith Palmer Memorial +dataNodes: 2 +clusterId: p-9fdkl5ju29 +dataGroups: + - iamAuthentication: false + region: uksouth + instanceType: azure:Standard_E2s_v3 + volumeType: azurepremiumstorage + volumeProperties: P2 + allowIpRangeMap: + - cidr: 9.9.9.9/28 + description: Allow traffic from App A + - cidr: 10.10.10.10/27 + description: Allow traffic from App B + pgConfigMap: + application_name: test + array_nulls: true + backupRetentionPeriod: 30d + sourceGroupId: p-9fdkl5ju29-a +``` + +### Get distributed-high-availability cluster connection information + +You first need to get your cluster group's connection information in order to connect to and use your BigAnimal distributed-high-availability cluster. + +The syntax of the command is: + +``` +biganimal pgd show-group-connection {--id --group-id} [--read-only] \ + [--read-write] +``` diff --git a/product_docs/docs/biganimal/release/reference/cli/using_features.mdx b/product_docs/docs/biganimal/release/reference/cli/using_features.mdx index 5a52d800b73..a55c32443fc 100644 --- a/product_docs/docs/biganimal/release/reference/cli/using_features.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/using_features.mdx @@ -89,13 +89,21 @@ For more information on IAM authentication for AWS, see [IAM authentication for You can get the URLs to access Prometheus metrics and logs in your cloud provider's blob storage solution using the `cluster show-monitoring-urls` CLI command. -The syntax of the command is: +For single-node and high-availability clusters, the syntax of the command is: ``` -cluster show-monitoring-urls {--id | --provider --region --name} \ - [--metrics] [--logs] +biganimal cluster show-monitoring-urls {--id | --provider --region \ + --name} [--metrics] [--logs] ``` +For extreme-high-availability clusters, the syntax of the command is: + +``` +biganimal pgd show-group-monitoring-urls {--id --group-id} [--metrics] \ + [--logs] +``` + + If you don't use the optional flags to specify the output type, the output includes both the metrics URL and the logs URL. See [Other monitoring and logging solutions](/biganimal/latest/using_cluster/05_monitoring_and_logging/other_monitoring/) for more information about using the URLs to access metrics and logs. From b332d232ea53b6e03657a8126e19177f2cf1eb3d Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Wed, 9 Aug 2023 09:42:36 -0400 Subject: [PATCH 26/38] fixed stray distributed HA references --- .../biganimal/release/reference/cli/managing_clusters.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx index f3d6e8b5684..2d385983ff9 100644 --- a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx @@ -419,7 +419,7 @@ Use the BigAnimal `pgd` commands to create, retrieve information on, and manage In addition to these BigAnimal `pgd` CLI commands, you can use [EDB Postgres Distributed CLI commands](/pgd/latest/cli/) to manage your extreme-high-availability clusters. !!!warning -Don't use the EDB Postgres Distributed `create-proxy` and `delete-proxy` CLI commands with your BigAnimal distributed high-availability clusters. +Don't use the EDB Postgres Distributed `create-proxy` and `delete-proxy` CLI commands with your BigAnimal extreme-high-availability clusters. ### Create an extreme-high-availability cluster @@ -570,9 +570,9 @@ dataGroups: sourceGroupId: p-9fdkl5ju29-a ``` -### Get distributed-high-availability cluster connection information +### Get extreme-high-availability cluster connection information -You first need to get your cluster group's connection information in order to connect to and use your BigAnimal distributed-high-availability cluster. +You first need to get your cluster group's connection information in order to connect to and use your BigAnimal extreme-high-availability cluster. The syntax of the command is: From 25fec66ab2e2446df78f3522fde294bf5d608f80 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Wed, 9 Aug 2023 12:54:45 -0400 Subject: [PATCH 27/38] removed deep ToC --- .../docs/biganimal/release/reference/cli/managing_clusters.mdx | 1 - 1 file changed, 1 deletion(-) diff --git a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx index 2d385983ff9..f5c3ea770ad 100644 --- a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx @@ -1,7 +1,6 @@ --- title: Managing clusters using the CLI navTitle: Managing clusters -deepToC: true --- These examples show Azure as the cloud provider unless indicated otherwise. The functionality is the same when using AWS or Google Cloud. From 7b4ee0e9845b2d595f8f8bdf36e4fe666bfd7f13 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Wed, 9 Aug 2023 14:48:59 -0400 Subject: [PATCH 28/38] testing deep ToC --- .../docs/biganimal/release/reference/cli/managing_clusters.mdx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx index f5c3ea770ad..521f016abb8 100644 --- a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx @@ -1,6 +1,7 @@ --- title: Managing clusters using the CLI navTitle: Managing clusters +deepToC: true --- These examples show Azure as the cloud provider unless indicated otherwise. The functionality is the same when using AWS or Google Cloud. @@ -248,6 +249,7 @@ You can list all deleted clusters using the `show-deleted-clusters` command and ### Restore a cluster + BigAnimal continuously backs up your PostgreSQL clusters. Using the CLI, you can restore a cluster from its backup to any point in time as long as the backups are retained in the backup storage. The restored cluster can be in another region and with different configurations. You can specify new configurations in the `cluster restore` command. For example: ```shell From dfa313f092c38aa57fc826e1c4d244e1d9a9e8d8 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Wed, 9 Aug 2023 15:37:57 -0400 Subject: [PATCH 29/38] Revert "testing deep ToC" This reverts commit e707725088e70d70ff1a5ea97087027555449328. --- .../docs/biganimal/release/reference/cli/managing_clusters.mdx | 2 -- 1 file changed, 2 deletions(-) diff --git a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx index 521f016abb8..f5c3ea770ad 100644 --- a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx @@ -1,7 +1,6 @@ --- title: Managing clusters using the CLI navTitle: Managing clusters -deepToC: true --- These examples show Azure as the cloud provider unless indicated otherwise. The functionality is the same when using AWS or Google Cloud. @@ -249,7 +248,6 @@ You can list all deleted clusters using the `show-deleted-clusters` command and ### Restore a cluster - BigAnimal continuously backs up your PostgreSQL clusters. Using the CLI, you can restore a cluster from its backup to any point in time as long as the backups are retained in the backup storage. The restored cluster can be in another region and with different configurations. You can specify new configurations in the `cluster restore` command. For example: ```shell From 9eb078cc3bcd7ab8c11c8b65c9a07e072f3b7297 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Thu, 10 Aug 2023 05:03:38 -0400 Subject: [PATCH 30/38] Fixed stray slash --- product_docs/docs/biganimal/release/reference/cli/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/biganimal/release/reference/cli/index.mdx b/product_docs/docs/biganimal/release/reference/cli/index.mdx index 4566e98a9e9..88d2576049d 100644 --- a/product_docs/docs/biganimal/release/reference/cli/index.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/index.mdx @@ -54,7 +54,7 @@ Before using the CLI to manage BigAnimal, you need to authenticate as a valid Bi ```shell biganimal credential create\ - --name "ba-user1" \ + --name "ba-user1" __OUTPUT__ Querying Authentication Endpoint for 'portal.biganimal.com' First, copy your one-time code: From ae1db39dbafe1210787b4c97a79d07010eb11a17 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Thu, 10 Aug 2023 10:36:13 -0400 Subject: [PATCH 31/38] fixing tip formatting --- .../docs/biganimal/release/reference/cli/managing_clusters.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx index f5c3ea770ad..433afd9f185 100644 --- a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx @@ -418,7 +418,7 @@ Use the BigAnimal `pgd` commands to create, retrieve information on, and manage In addition to these BigAnimal `pgd` CLI commands, you can use [EDB Postgres Distributed CLI commands](/pgd/latest/cli/) to manage your extreme-high-availability clusters. !!!warning -Don't use the EDB Postgres Distributed `create-proxy` and `delete-proxy` CLI commands with your BigAnimal extreme-high-availability clusters. + Don't use the EDB Postgres Distributed `create-proxy` and `delete-proxy` CLI commands with your BigAnimal extreme-high-availability clusters. ### Create an extreme-high-availability cluster From 76488817b45425c8a23c88b456d90ab1bec4435e Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Thu, 10 Aug 2023 11:02:36 -0400 Subject: [PATCH 32/38] added deep toc back in --- .../docs/biganimal/release/reference/cli/managing_clusters.mdx | 1 + 1 file changed, 1 insertion(+) diff --git a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx index 433afd9f185..1eb2a9b7735 100644 --- a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx @@ -1,6 +1,7 @@ --- title: Managing clusters using the CLI navTitle: Managing clusters +deepToC: true --- These examples show Azure as the cloud provider unless indicated otherwise. The functionality is the same when using AWS or Google Cloud. From 222e38d26072a4be08455b9f93ae5570f477d731 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Thu, 10 Aug 2023 14:10:08 -0400 Subject: [PATCH 33/38] used new cluster names in text, left old names in actual CLI output where they haven't changed yet --- .../reference/cli/managing_clusters.mdx | 47 ++++++++++--------- .../release/reference/cli/using_features.mdx | 6 +-- 2 files changed, 27 insertions(+), 26 deletions(-) diff --git a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx index 1eb2a9b7735..c769459ddb1 100644 --- a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx @@ -6,9 +6,9 @@ deepToC: true These examples show Azure as the cloud provider unless indicated otherwise. The functionality is the same when using AWS or Google Cloud. -## Managing single-node and high-availability clusters +## Managing single-node and primary/standby high-availability clusters -Use the `cluster` commands to create, retrieve information on, and manage single-node and high-availability clusters. +Use the `cluster` commands to create, retrieve information on, and manage single-node and primary/standby high-availability clusters. ### Create a cluster in interactive mode @@ -18,7 +18,7 @@ The default mode for the `cluster create` and `pgd create` commands is an intera You can turn off prompting using the `biganimal config set interactive_mode off` command. With prompting disabled, if any required flags are missing, the CLI exits with an error. !!! -For example, to create a high-availability cluster: +For example, to create a primary/standby high-availability cluster: ```shell biganimal cluster create @@ -138,7 +138,7 @@ __OUTPUT__ ``` !!!Note -Extreme-high-availability architecture isn't enabled by default. To get access, contact your sales representative or [Support](/biganimal/release/overview/support). +Distributed high-availability architecture isn't enabled by default. To get access, contact your sales representative or [Support](/biganimal/release/overview/support). !!! !!!Tip @@ -231,7 +231,7 @@ Cluster ID is "p-gxhkfww1fe" To specify multiple configurations, you can use multiple `--pg-config` flags or include multiple configuration settings as a key-value array string separated by commas in one `--pg-config` flag. If a Postgres setting contains a comma, you need to specify it with a separate `--pg-config` flag. !!! Note -You can update the cluster architecture with the `--cluster-architecture` flag. The only supported scenario is to update a single-node cluster to a high-availability cluster. +You can update the cluster architecture with the `--cluster-architecture` flag. The only supported scenario is to update a single-node cluster to a primary/standby high-availability cluster. !!! ### Delete a cluster @@ -275,7 +275,7 @@ The password for the restored cluster is mandatory. The other parameters, if not To restore a deleted cluster, use the `--from-deleted` flag in the command. !!! Note -You can restore a cluster in a single cluster to a high-availability cluster and vice versa. You can restore an extreme-high-availability cluster only to a cluster using the same architecture. +You can restore a cluster in a single cluster to a primary/standby high-availability cluster and vice versa. You can restore a distributed high-availability cluster only to a cluster using the same architecture. !!! ### Get cluster connection information @@ -364,7 +364,7 @@ Cluster ID is "p-gxhkfww1fe" To specify multiple configurations, you can use multiple `--pg-config` flags or include multiple configuration settings as a key-value array string separated by commas in one `--pg-config` flag. If a Postgres setting contains a comma, you need to specify it with a separate `--pg-config` flag. !!! Note -You can update the cluster architecture with the `--cluster-architecture` flag. The only supported scenario is to update a single-node cluster to a high-availability cluster. +You can update the cluster architecture with the `--cluster-architecture` flag. The only supported scenario is to update a single-node cluster to a primary/standby high-availability cluster. !!! ### Delete a cluster @@ -408,22 +408,23 @@ The password for the restored cluster is mandatory. The other parameters, if not To restore a deleted cluster, use the `--from-deleted` flag in the command. !!! Note -You can restore a cluster in a single cluster to a high-availability cluster and vice versa. You can restore an extreme-high-availability cluster only to a cluster using the same architecture. +You can restore a cluster in a single cluster to a primary/standby high-availability cluster and vice versa. You can restore a distributed high-availability cluster only to a cluster using the same architecture. !!! -## Managing extreme-high-availability clusters +## Managing distributed high-availability clusters -Use the BigAnimal `pgd` commands to create, retrieve information on, and manage extreme-high-availability clusters. +Use the BigAnimal `pgd` commands to create, retrieve information on, and manage distributed high-availability clusters. -In addition to these BigAnimal `pgd` CLI commands, you can use [EDB Postgres Distributed CLI commands](/pgd/latest/cli/) to manage your extreme-high-availability clusters. +!!!note + In addition to these BigAnimal `pgd` CLI commands, you can use [EDB Postgres Distributed CLI commands](/pgd/latest/cli/) to manage your distributed high-availability clusters. + + Don't confuse the BigAnimal `pgd` commands (`biganimal pgd `) with the EDB Postgres Distributed CLI `pgd` command (`pgd `). While EDB Postgres Distributed CLI works with BigAnimal clusters, it is capable of performing operations which may inhibit that management. Specifically, don't use the EDB Postgres Distributed CLI `create-proxy` and `delete-proxy` commands. -!!!warning - Don't use the EDB Postgres Distributed `create-proxy` and `delete-proxy` CLI commands with your BigAnimal extreme-high-availability clusters. -### Create an extreme-high-availability cluster +### Create a distributed high-availability cluster -Create an extreme-high-availability cluster using a YAML configuration file. +Create a distributed high-availability cluster using a YAML configuration file. The syntax of the command is: @@ -513,9 +514,9 @@ witnessGroups: - region: westus2 ``` -### Update an extreme-high-availability cluster +### Update a distributed high-availability cluster -Update an extreme-high-availability cluster and its data groups using a YAML configuration file. +Update a distributed high-availability cluster and its data groups using a YAML configuration file. The syntax of the command is: @@ -523,9 +524,9 @@ The syntax of the command is: pgd update [--config-file] ``` -Where `` is a valid path to a YAML configuration file with the same format as a configuration file for creating an extreme-high-availability cluster. See [Create an extreme-high-availability cluster](#create-an-extreme-high-availability-cluster). +Where `` is a valid path to a YAML configuration file with the same format as a configuration file for creating a distributed high-availability cluster. See [Create a distributed high-availability cluster](#create-a-distributed-high-availability-cluster). -### Show extreme-high-availability clusters +### Show distributed high-availability clusters Show all active clusters or a specific cluster. You can also optionally show deleted clusters. @@ -535,9 +536,9 @@ The syntax of the command is: biganimal pgd show [--id] [--deleted] ``` -### Restore an extreme-high-availability cluster +### Restore a distributed high-availability cluster -Restore an extreme-high-availability cluster or a deleted extreme-high-availability cluster to a new cluster on the same cloud provider. You can restore an active cluster or a deleted cluster within its retention period. You can only restore one data group. By default, the new cluster inherits all settings of the source cluster. You can change the cluster setting and database configurations by specifying new values in the configuration file. +Restore a distributed high-availability cluster or a deleted distributed high-availability cluster to a new cluster on the same cloud provider. You can restore an active cluster or a deleted cluster within its retention period. You can only restore one data group. By default, the new cluster inherits all settings of the source cluster. You can change the cluster setting and database configurations by specifying new values in the configuration file. The syntax of the command is: @@ -570,9 +571,9 @@ dataGroups: sourceGroupId: p-9fdkl5ju29-a ``` -### Get extreme-high-availability cluster connection information +### Get distributed high-availability cluster connection information -You first need to get your cluster group's connection information in order to connect to and use your BigAnimal extreme-high-availability cluster. +You first need to get your cluster group's connection information in order to connect to and use your BigAnimal distributed high-availability cluster. The syntax of the command is: diff --git a/product_docs/docs/biganimal/release/reference/cli/using_features.mdx b/product_docs/docs/biganimal/release/reference/cli/using_features.mdx index a55c32443fc..9430e081012 100644 --- a/product_docs/docs/biganimal/release/reference/cli/using_features.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/using_features.mdx @@ -51,7 +51,7 @@ __OUTPUT__ ### Promote a faraway replica -You use the `faraway-replica promote` command to promote an existing replica to a standalone single-node or high-availability cluster. You can use either interactive mode or specify the settings with flags on the command line. This example shows interactive mode: +You use the `faraway-replica promote` command to promote an existing replica to a standalone single-node or primary/standby high-availability cluster. You can use either interactive mode or specify the settings with flags on the command line. This example shows interactive mode: ``` biganimal faraway-replica promote @@ -89,14 +89,14 @@ For more information on IAM authentication for AWS, see [IAM authentication for You can get the URLs to access Prometheus metrics and logs in your cloud provider's blob storage solution using the `cluster show-monitoring-urls` CLI command. -For single-node and high-availability clusters, the syntax of the command is: +For single-node and primary/standby high-availability clusters, the syntax of the command is: ``` biganimal cluster show-monitoring-urls {--id | --provider --region \ --name} [--metrics] [--logs] ``` -For extreme-high-availability clusters, the syntax of the command is: +For distributed high-availability clusters, the syntax of the command is: ``` biganimal pgd show-group-monitoring-urls {--id --group-id} [--metrics] \ From ef1d9a1c5290772946e4241abb63205eb59fbaf1 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Fri, 11 Aug 2023 08:14:13 -0400 Subject: [PATCH 34/38] updating the note again, softening it this time --- .../biganimal/release/reference/cli/managing_clusters.mdx | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx index c769459ddb1..8f1ba776ff5 100644 --- a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx @@ -417,9 +417,7 @@ You can restore a cluster in a single cluster to a primary/standby high-availabi Use the BigAnimal `pgd` commands to create, retrieve information on, and manage distributed high-availability clusters. !!!note - In addition to these BigAnimal `pgd` CLI commands, you can use [EDB Postgres Distributed CLI commands](/pgd/latest/cli/) to manage your distributed high-availability clusters. - - Don't confuse the BigAnimal `pgd` commands (`biganimal pgd `) with the EDB Postgres Distributed CLI `pgd` command (`pgd `). While EDB Postgres Distributed CLI works with BigAnimal clusters, it is capable of performing operations which may inhibit that management. Specifically, don't use the EDB Postgres Distributed CLI `create-proxy` and `delete-proxy` commands. + In addition to the BigAnimal `pgd` commands, you can switch over and use additional commands available in the EDB Postgres Distributed CLI to perform PGD-specific operations. The only EDB Postgres Distributed CLI commands that aren't applicable with BigAnimal are `create-proxy` and `delete-proxy`. ### Create a distributed high-availability cluster From b23c7d5754065a9cec1e14ce5936b820c0578773 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Fri, 11 Aug 2023 13:18:24 -0400 Subject: [PATCH 35/38] added link that got dropped back in --- .../docs/biganimal/release/reference/cli/managing_clusters.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx index 8f1ba776ff5..b46c2d00abd 100644 --- a/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx +++ b/product_docs/docs/biganimal/release/reference/cli/managing_clusters.mdx @@ -417,7 +417,7 @@ You can restore a cluster in a single cluster to a primary/standby high-availabi Use the BigAnimal `pgd` commands to create, retrieve information on, and manage distributed high-availability clusters. !!!note - In addition to the BigAnimal `pgd` commands, you can switch over and use additional commands available in the EDB Postgres Distributed CLI to perform PGD-specific operations. The only EDB Postgres Distributed CLI commands that aren't applicable with BigAnimal are `create-proxy` and `delete-proxy`. + In addition to the BigAnimal `pgd` commands, you can switch over and use additional commands available in the [EDB Postgres Distributed CLI](/pgd/latest/cli/) to perform PGD-specific operations. The only EDB Postgres Distributed CLI commands that aren't applicable with BigAnimal are `create-proxy` and `delete-proxy`. ### Create a distributed high-availability cluster From ce92dd0b264ab4261155ff9a1a1e3bf26227a35e Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Fri, 11 Aug 2023 13:27:06 -0400 Subject: [PATCH 36/38] fixed link --- .../docs/biganimal/release/overview/02_high_availability.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/biganimal/release/overview/02_high_availability.mdx b/product_docs/docs/biganimal/release/overview/02_high_availability.mdx index 5d5133c0c74..33c2cf1627a 100644 --- a/product_docs/docs/biganimal/release/overview/02_high_availability.mdx +++ b/product_docs/docs/biganimal/release/overview/02_high_availability.mdx @@ -80,4 +80,4 @@ A single-region configuration with three data nodes (one lead and two shadow nod For instructions on creating a distributed high-availability cluster using the BigAnimal portal, see [Creating a distributed high-availability cluster](../getting_started/creating_a_cluster/creating_an_eha_cluster/). -For instructions on creating, retrieving information from, and managing a distributed high-availability cluster using the BigAnimal CLI, see [Using `pgd` commands](/biganimal/latest/reference/cli/managing_clusters/#using-pgd-commands). +For instructions on creating, retrieving information from, and managing a distributed high-availability cluster using the BigAnimal CLI, see [Using `pgd` commands](/biganimal/latest/reference/cli/managing_clusters/#managing-distributed-high-availability-clusters). From db8c9aee04449229f687e25f35dece0ac3c7943b Mon Sep 17 00:00:00 2001 From: Fran Coughlin <132373434+francoughlin@users.noreply.github.com> Date: Fri, 11 Aug 2023 14:38:09 -0400 Subject: [PATCH 37/38] Update product_docs/docs/epas/15/application_programming/02_packages/01a_display_packages.mdx Co-authored-by: Dee Dee Rothery <83650384+drothery-edb@users.noreply.github.com> --- .../02_packages/01a_display_packages.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/epas/15/application_programming/02_packages/01a_display_packages.mdx b/product_docs/docs/epas/15/application_programming/02_packages/01a_display_packages.mdx index 1ae99f419fd..d6090656549 100644 --- a/product_docs/docs/epas/15/application_programming/02_packages/01a_display_packages.mdx +++ b/product_docs/docs/epas/15/application_programming/02_packages/01a_display_packages.mdx @@ -14,7 +14,7 @@ You can view the package specification and package body definition using the psq \spb[+] []. ``` -## Create and viewing a package and a package body +## Creating and viewing a package and a package body Create a package and a package body `test_pkg` in the `public` schema: From ce620ae0d111e9595297668e3f13e57489435e88 Mon Sep 17 00:00:00 2001 From: drothery-edb Date: Fri, 11 Aug 2023 16:57:09 -0400 Subject: [PATCH 38/38] BigAnimal: fixed CLI link label --- .../docs/biganimal/release/overview/02_high_availability.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/product_docs/docs/biganimal/release/overview/02_high_availability.mdx b/product_docs/docs/biganimal/release/overview/02_high_availability.mdx index 33c2cf1627a..188e2b8151f 100644 --- a/product_docs/docs/biganimal/release/overview/02_high_availability.mdx +++ b/product_docs/docs/biganimal/release/overview/02_high_availability.mdx @@ -80,4 +80,4 @@ A single-region configuration with three data nodes (one lead and two shadow nod For instructions on creating a distributed high-availability cluster using the BigAnimal portal, see [Creating a distributed high-availability cluster](../getting_started/creating_a_cluster/creating_an_eha_cluster/). -For instructions on creating, retrieving information from, and managing a distributed high-availability cluster using the BigAnimal CLI, see [Using `pgd` commands](/biganimal/latest/reference/cli/managing_clusters/#managing-distributed-high-availability-clusters). +For instructions on creating, retrieving information from, and managing a distributed high-availability cluster using the BigAnimal CLI, see [Using the BigAnimal CLI](/biganimal/latest/reference/cli/managing_clusters/#managing-distributed-high-availability-clusters).