users = client.listUsers();
+ users.forEach(user -> System.out.println("\t- " + user));
+
+ // Changing the user password.
+ client.changePassword(username, password, "newTestTest123!");
+ }
+
+}
+```
+
+:::
+
+
+::: tab Python
+```python
+from grpc import RpcError
+from immudb import ImmudbClient
+from immudb.constants import PERMISSION_ADMIN, PERMISSION_R, PERMISSION_RW
+from immudb.grpc.schema_pb2 import GRANT, REVOKE
+from enum import IntEnum
+
+URL = "localhost:3322" # immudb running on your machine
+LOGIN = "immudb" # Default username
+PASSWORD = "immudb" # Default password
+DB = b"defaultdb" # Default database name (must be in bytes)
+
+def main():
+ client = ImmudbClient(URL)
+ client.login(LOGIN, PASSWORD, database = DB)
+ passwordForNewUsers = "Te1st!@#Test"
+ try:
+ client.createUser("tester1", passwordForNewUsers, PERMISSION_R, DB)
+ client.createUser("tester2", passwordForNewUsers, PERMISSION_RW, DB)
+ client.createUser("tester3", passwordForNewUsers, PERMISSION_ADMIN, DB)
+ except RpcError as exception:
+ print(exception.details())
+
+ users = client.listUsers().userlist.users # immudb.handler.listUsers.listUsersResponse
+ for user in users:
+ print("User", user.user)
+ print("Created by", user.createdby)
+ print("Creation date", user.createdat)
+ print("Is active", user.active)
+ for permission in user.permissions:
+ print("Permission", permission.database, permission.permission)
+ print("---")
+
+ client.login("tester3", passwordForNewUsers, DB)
+ client.changePermission(GRANT, "tester2", DB, PERMISSION_ADMIN)
+ client.changePermission(REVOKE, "tester2", DB, PERMISSION_ADMIN)
+
+ client.login(LOGIN, PASSWORD, database = DB)
+ # Changing password
+ client.changePassword("tester1", "N1ewpassword!", passwordForNewUsers)
+
+ # User logs with new password
+ client.login("tester1", "N1ewpassword!")
+
+ client.login(LOGIN, PASSWORD, database = DB)
+ client.changePassword("tester1", passwordForNewUsers, "N1ewpassword!")
+
+
+ client.login("tester1", passwordForNewUsers, DB)
+
+ # No permissions to write
+ try:
+ client.set(b"test", b"test")
+ except RpcError as exception:
+ print(exception.details())
+
+ # But has permissions to read
+ result = client.get(b"test")
+
+ client.login("tester3", passwordForNewUsers, DB)
+
+ # Now will have permissions to write
+ client.changePermission(GRANT, "tester1", DB, PERMISSION_RW)
+ client.login("tester1", passwordForNewUsers, DB)
+ client.set(b"test", b"test")
+ result = client.get(b"test")
+
+ client.login("tester3", passwordForNewUsers, DB)
+
+ # Now will have permissions to nothing
+ client.changePermission(REVOKE, "tester1", DB, PERMISSION_RW)
+
+ try:
+ client.login("tester1", passwordForNewUsers, DB)
+ except RpcError as exception:
+ print(exception.details())
+
+ client.login("tester3", passwordForNewUsers, DB)
+ client.changePermission(GRANT, "tester1", DB, PERMISSION_RW)
+
+
+if __name__ == "__main__":
+ main()
+```
+:::
+
+::: tab Node.js
+```ts
+import ImmudbClient from 'immudb-node'
+import Parameters from 'immudb-node/types/parameters'
+import { USER_ACTION, USER_PERMISSION } from 'immudb-node/dist/types/user'
+
+const IMMUDB_HOST = '127.0.0.1'
+const IMMUDB_PORT = '3322'
+const IMMUDB_USER = 'immudb'
+const IMMUDB_PWD = 'immudb'
+
+const cl = new ImmudbClient({ host: IMMUDB_HOST, port: IMMUDB_PORT });
+
+(async () => {
+ await cl.login({ user: IMMUDB_USER, password: IMMUDB_PWD })
+
+ const createUserRequest: Parameters.CreateUser = {
+ user: 'myNewUser1',
+ password: 'myS3cretPassword!',
+ permission: USER_PERMISSION.READ_ONLY,
+ database: 'defaultdb',
+ };
+ const createUserRes = cl.createUser(createUserRequest)
+ console.log('success: createUser', createUserRes)
+
+ const changePermissionReq: Parameters.ChangePermission = {
+ action: USER_ACTION.GRANT,
+ username: 'myNewUser1',
+ database: 'defaultDB',
+ permission: USER_PERMISSION.READ_WRITE
+ }
+ const changePermissionRes = await cl.changePermission(changePermissionReq)
+ console.log('success: changePermission', changePermissionRes)
+
+ const changePasswordReq: Parameters.ChangePassword = {
+ user: 'myNewUser1',
+ oldpassword: 'myS3cretPassword!',
+ newpassword: 'myNewS3cretPassword!'
+ }
+ const changePasswordRes = await cl.changePassword(changePasswordReq)
+ console.log('success: changePassword', changePermissionRes)
+})()
+```
+:::
+
+::: tab .NET
+
+```csharp
+
+var client = new ImmuClient();
+await client.Open("immudb", "immudb", "defaultdb");
+
+try
+{
+ await client.CreateUser("testUser", "testTest123!", Permission.PERMISSION_ADMIN, "defaultdb");
+}
+catch (Exception e)
+{
+ Console.WriteLine($"An exception in create user: {e}");
+ return;
+}
+
+await client.ChangePassword("testUser", "testTest123!", "newTestTest123!");
+await client.Close();
+
+```
+
+:::
+
+::: tab Others
+If you're using another development language, please refer to the [immugw](../connecting/immugw.md) option.
+:::
+
+::::
+
+
+
+
+
+## Changing user's password
+
+Changing user's password can be done by either the sysadmin user (which can be done for any user in the database)
+or by user with Admin permission to at least one database that has created the user whose password is changed:
+
+```bash
+$ immuadmin user changepassword user
+Choose a password for user:
+Confirm password:
+user's password has been changed
+```
+
+The sysadmin user can change his own password the same way but it requires confirming the existing password:
+
+```bash
+$ immuadmin user changepassword immudb
+Old password:
+Choose a password for immudb:
+Confirm password:
+immudb's password has been changed
+```
+
+
+
+
+
+## Emergency reset of sysadmin user password
+
+If the current password of the sysadmin user is not known, it can be reset by running the immudb server with password reset option:
+
+```bash
+immudb --force-admin-password --admin-password
+```
+
+The `--force-admin-password` flag ensures that the sysadmin user will have its password reset to the one given through
+the `--admin-password` option.
+
+
diff --git a/src/1.9.5/playground.md b/src/1.9.5/playground.md
new file mode 100644
index 0000000000..6164ab63b2
--- /dev/null
+++ b/src/1.9.5/playground.md
@@ -0,0 +1,9 @@
+# Playground
+
+[**immudb Playground**](https://play.codenotary.com) is an interactive environment for learning about immudb:
+
+
+
+[![image](/playground.jpg)](https://play.codenotary.com)
+
+
\ No newline at end of file
diff --git a/src/1.9.5/production/auditor.md b/src/1.9.5/production/auditor.md
new file mode 100644
index 0000000000..db1c869e36
--- /dev/null
+++ b/src/1.9.5/production/auditor.md
@@ -0,0 +1,152 @@
+# Tampering detection
+
+The Auditor is a component for checking if immudb was tampered, it's a good practice to run the auditor as a separate and independent component.
+
+immuclient and [immugw](https://github.com/codenotary/immugw) are shipped with auditor capabilities.
+
+
+
+## Running an Auditor with immuclient
+
+immuclient can act as Auditor by running the following command:
+
+```bash
+$ ./immuclient audit-mode
+1m0s
+immuclientd 2022/05/22 12:34:11 INFO: starting auditor with a 1m0s interval ...
+immuclientd 2022/05/22 12:34:11 INFO: auditor monitoring HTTP server starting on 0.0.0.0:9477 ...
+immuclientd 2022/05/22 12:34:11 INFO: audit #1 started @ 2022-05-22 12:34:11.543823286 +0200 CEST m=+0.153679785
+immuclientd 2022/05/22 12:34:11 INFO: audit #1 - list of databases to audit has been (re)loaded - 2 database(s) found: [defaultdb mydatabase]
+immuclientd 2022/05/22 12:34:11 INFO: audit #1 - auditing database defaultdb
+immuclientd 2022/05/22 12:34:11 INFO: audit #1 finished in 55.295777ms @ 2022-05-22T12:34:11.599119184+02:00
+```
+
+immuclient is now running on the following address: 0.0.0.0:9477/metrics
+
+example output:
+
+```bash
+# HELP immuclient_audit_curr_root_per_server Current root index used for the latest audit.
+# TYPE immuclient_audit_curr_root_per_server gauge
+immuclient_audit_curr_root_per_server{server_address="127.0.0.1:3322",server_id="br8eugq036tfln0ct6o0"} 2
+# HELP immuclient_audit_prev_root_per_server Previous root index used for the latest audit.
+# TYPE immuclient_audit_prev_root_per_server gauge
+immuclient_audit_prev_root_per_server{server_address="127.0.0.1:3322",server_id="br8eugq036tfln0ct6o0"} -1
+# HELP immuclient_audit_result_per_server Latest audit result (1 = ok, 0 = tampered).
+# TYPE immuclient_audit_result_per_server gauge
+immuclient_audit_result_per_server{server_address="127.0.0.1:3322",server_id="br8eugq036tfln0ct6o0"} -1
+# HELP immuclient_audit_run_at_per_server Timestamp in unix seconds at which latest audit run.
+# TYPE immuclient_audit_run_at_per_server gauge
+immuclient_audit_run_at_per_server{server_address="127.0.0.1:3322",server_id="br8eugq036tfln0ct6o0"} 1.5907565337454605e+09
+```
+
+immuclient looks for immudb at 127.0.0.1:3322 by default with the default username and password. Nevertheless a number of parameters can be defined:
+
+```
+immuclient audit-mode - Run a foreground auditor
+immuclient audit-mode install - Install and runs daemon
+immuclient audit-mode stop - Stops the daemon
+immuclient audit-mode start - Starts initialized daemon
+immuclient audit-mode restart - Restarts daemon
+immuclient audit-mode uninstall - Removes daemon and its setup
+
+Flags:
+ -h, --help help for audit-mode
+
+Global Flags:
+ --audit-databases string Optional comma-separated list of databases (names) to be audited. Can be full name(s) or just name prefix(es).
+ --audit-monitoring-host string Host for the monitoring HTTP server when running in audit mode (serves endpoints like metrics, health and version). (default "0.0.0.0")
+ --audit-monitoring-port int Port for the monitoring HTTP server when running in audit mode (serves endpoints like metrics, health and version). (default 9477)
+ --audit-notification-password string Password used to authenticate when publishing audit result to 'audit-notification-url'.
+ --audit-notification-url string If set, auditor will send a POST request at this URL with audit result details.
+ --audit-notification-username string Username used to authenticate when publishing audit result to 'audit-notification-url'.
+ --audit-password string immudb password used to login during audit; can be plain-text or base64 encoded (must be prefixed with 'enc:' if it is encoded)
+ --audit-username string immudb username used to login during audit
+ --certificate string server certificate file path (default "./tools/mtls/4_client/certs/localhost.cert.pem")
+ --clientcas string clients certificates list. Aka certificate authority (default "./tools/mtls/2_intermediate/certs/ca-chain.cert.pem")
+ --config string config file (default path are configs or $HOME. Default filename is immuclient.toml)
+ --database string immudb database to be used
+ --dir string Main directory for audit process tool to initialize (default "/var/folders/0z/wk6v4sjd31qbvt7l75t_z_v00000gn/T/")
+ -a, --immudb-address string immudb host address (default "127.0.0.1")
+ -p, --immudb-port int immudb port number (default 3322)
+ --max-recv-msg-size int max message size in bytes the client can receive (default 4194304)
+ -m, --mtls enable mutual tls
+ --password string immudb password used to login; can be plain-text or base64 encoded (must be prefixed with 'enc:' if it is encoded)
+ --pkey string server private key path (default "./tools/mtls/4_client/private/localhost.key.pem")
+ --roots-filepath string Filepath for storing root hashes after every successful audit loop. Default is tempdir of every OS. (default "/tmp/")
+ --server-signing-pub-key string Path to the public key to verify signatures when presents
+ --servername string used to verify the hostname on the returned certificates (default "localhost")
+ --tokenfile string authentication token file (default path is $HOME or binary location; default filename is )
+ --username string immudb username used to login
+ --value-only returning only values for get operations
+```
+
+To get the (signed) state in combination with the immuclient with auditor capabilities:
+
+```bash
+immuclient audit-mode --audit-username {immudb-username} --audit-password {immudb-pw} --server-signing-pub-key {state-public-key}
+```
+
+
+
+
+
+## Running immuclient Auditor as a service
+
+immuclient as Auditor can be installed in the system with the following command:
+
+Install service:
+
+```bash
+immuclient audit-mode install
+```
+
+In this case, all parameters are written into the `immuclient` configuration file:
+
+- Linux: `/etc/immudb/immuclient.toml`
+- Windows: `C:\ProgramData\ImmuClient\config\immuclient.toml`
+
+
+
+
+
+## Auditor best practices
+
+### How can I be notified if my immudb instance was tampered?
+
+It's possible to provide an external url that will be triggered in case a tamper is detected.
+By configuring `IMMUCLIENT_AUDIT_NOTIFICATION_URL`, a POST request will be sent with the following body:
+
+```
+{
+ "current_state": {
+ "hash": "string",
+ "signature": {
+ "public_key": "string",
+ "signature": "string"
+ },
+ "tx": 0
+ },
+ "db": "string",
+ "password": "string",
+ "previous_state": {
+ "hash": "string",
+ "signature": {
+ "public_key": "string",
+ "signature": "string"
+ },
+ "tx": 0
+ },
+ "run_at": "2020-11-13T00:53:42+01:00",
+ "tampered": true,
+ "username": "string"
+}
+```
+
+NOTE: it's not possible to know at which transaction the database was tampered. The Auditor checks every second if the data was tampered - so it's only possible to know at which time frame the tampering was detected.
+
+### How many Auditors should I run to secure my immudb instance?
+
+A proper setup of one immuclient instance can fit most of cases, but there are ways to increase the security on detecting tampering. A single instance can go offline for any reason: network problems, hardware failures or attacks. Therefore a good practice can be to have multiple Auditor instances running in different zones.
+
+
diff --git a/src/1.9.5/production/backup.md b/src/1.9.5/production/backup.md
new file mode 100644
index 0000000000..19b7777a96
--- /dev/null
+++ b/src/1.9.5/production/backup.md
@@ -0,0 +1,125 @@
+
+# Hot backup and restore
+
+
+
+Hot backup/restore feature allows to backup and restore immudb database without stopping immudb engine. Database remains accessible during backup process. It is possibly to perform full or incremental/differential backup and restore.
+
+Both backup and restore functions can use streams or files as a source/destination.
+
+Backup file is not compressed, assuming user may use any suitable method (see examples for bzip2 compression).
+
+
+
+
+
+## Backup
+
+```
+immuadmin hot-backup [-o [--append]] [--start-tx]
+```
+
+### Full backup
+
+To run full database backup, execute `immuadmin hot-backup ` command, specifying the optional backup file name with `-o` options. If `-o` option is not specified, output is sent to `stdout`.
+
+If backup file is specified with `-o` option, the file is created. If file already exists, backup process fails.
+
+### Incremental backup
+
+When backup database up to the existing file, `immuadmin` tools finds the last backed up database transaction in file, verifies its checksum and appends only database changes, made after this transaction. `immuadmin` requires user to specify `--append` command line option to append to existing file.
+
+When backup up to the stream, `immuadmin` doesn't have information about last backup up transaction, however user can specify the ID of the transaction to start from with `--start-tx` command line option. It allows user to implement incremental/differential backup strategy, using streams.
+
+
+
+
+
+## Restore
+```
+immuadmin hot-restore [-i ] [--append] [--force] [--force-replica]
+```
+
+### Full restore
+
+To run full restore, execute `immuadmin hot-restore ` command, specifying the optional backup file name with `-o` options. If `-o` option is not specified, input data is read from `stdin`.
+
+If database already exists, restore process fails.
+
+::: tip
+To boost performance while restoring from a backup, immudb could be run without strong durability warranties i.e. `./immudb --synced=false`. If a reboot or crash occurs while restoring from a backup, you may have to restart the restore process.
+
+Synced mode prevents data loss from unexpected crashes and shutdowns, but affects performance (default true), which is why you may want to restart immudb server with default settings after restoring.
+:::
+
+### Incremental restore
+
+If database already exists, it is possible to append new data from backup file to the database. In this case user has to specify `--append` flag.
+
+#### Transaction overlap/gap handling
+
+`immuadmin` tries to verify that backup file and database where data are being restored to have the same origin. To do this `immuadmin` finds the last transaction in source database and the same transaction in the backup file, and check transaction signatures. If transactions don't match, restore isn't possible.
+
+When there is no overlap between transactions in database and file, transaction verification is not possible. However, if there is no gap between transactions, `immuadmin` allows to bypass verification with `--force` command line option. If there is a gap between last transaction in database and first transaction in file, restore isn't possible.
+
+### Transaction verification
+
+During restore process `immuadmin` checks if checksum, reported by database after restoration of the transaction, matches the one stored in the file during backup process. It allows to detect backup file accidental or malicious corruption.
+
+### Replica flag handling
+
+It is possible to restore data only to the replica database. During full restore database automatically created as replica (replica flag is switched off after restore), but for incremental restore `immuadmin` assumes database is already in replica mode (user can use `immuadmin database update --replication-is-replica` command to switch on replica mode).
+
+However, it is possible to automatically switch on and off replica mode for incremental backup using `--force-replica` command line option.
+
+
+
+
+
+### Verifying backup file
+
+```
+immuadmin hot-restore --verify [-i ]
+```
+It is possible to verify backup file/stream using `immuadmin hot-restore --verify` command. It only checks the correctness of database file, e.g. file format and correct sequence of transactions in file. The only way to detect data corruption is to restore data.
+
+
+
+
+
+## Examples
+
+Full backup to file:
+```
+immuadmin hot-backup foo -o foo.backup
+immuadmin hot-backup foo > foo.backup
+```
+
+Incremental backup to file:
+```
+immuadmin hot-backup foo -o foo.backup --append
+```
+Incremental backup with `bzip2` compression:
+```
+immuadmin hot-backup foo --start-tx 123 | bzip2 > foo.bz2
+```
+Full restore
+```
+immuadmin hot-restore bar -i foo.backup
+immuadmin hot-restore bar < foo.backup
+```
+Full restore from `bzip2`-compressed file
+```
+bunzip2 foo.bz2 -c | immuadmin hot-restore bar
+```
+Incremental restore with automatic switching of replica mode
+```
+immuadmin hot-restore bar -i foo.backup --append --force-replica
+```
+
+Copy database:
+```
+immuadmin hot-backup foo | immuadmin hot-restore bar
+```
+
+
diff --git a/src/1.9.5/production/backwards-compatibility.md b/src/1.9.5/production/backwards-compatibility.md
new file mode 100644
index 0000000000..316d413310
--- /dev/null
+++ b/src/1.9.5/production/backwards-compatibility.md
@@ -0,0 +1,67 @@
+# Backwards compatibility
+
+
+
+### immudb 1.1 proof compatibility
+
+immudb 1.2 introduced KV metadata to support new features such as logical deletion or data expiration.
+This change required updates to the way a transaction hash is calculated.
+The downside of such change is that immudb clients using immudb 1.2+
+needed an updated method of proof calculation in order to verify newly added data.
+
+In some cases it is very hard or impossible to update the verification code on the client side.
+If this is the case, immudb offers a way to disable metadata to maintain compatibility with older clients.
+
+
+
+
+
+#### Enabling the 1.1 proof compatibility mode
+
+*Note: backwards compatibility mode is currently not available for the `detaultdb` database.*
+
+When creating new database, the mode can be specified with:
+
+```bash
+$ ./immuadmin database create --write-tx-header-version 0
+```
+
+Enabling compatibility mode for existing databases can be done by:
+
+```bash
+$ ./immuadmin database update --write-tx-header-version 0
+```
+
+*Note: immudb restart is needed to make this change effective.*
+
+In order to re-enable metadata-enhanced proofs,
+update database settings with `--write-tx-header-version 1` option.
+
+
+
+
+
+#### Limitations of 1.1 compatibility mode
+
+Switching to 1.1-compatible proof mode will disable metadata support and thus will make the following operations unavailable:
+
+* For KV interface:
+ * Logical deletion
+ * Data expiration
+ * Non-indexable entries
+* For SQL interface:
+ * Logical deletion
+ * Updates to indexed columns
+
+Make sure to test your application before enabling the 1.1 compatibility mode.
+
+#### Working with database that already contains metadata-enhanced entries
+
+Even though old clients can not validate proofs for metadata-enhanced records,
+those can still read the data without proofs as long as those don't use metadata.
+Operations such as `Get`, `Scan` or `History` will not cause errors in such workloads.
+
+If proofs are needed, KV entries that were previously added with metadata should
+be re-added to the database after enabling immudb 1.1 compatibility mode.
+
+
diff --git a/src/1.9.5/production/fips.md b/src/1.9.5/production/fips.md
new file mode 100644
index 0000000000..a57145bd8e
--- /dev/null
+++ b/src/1.9.5/production/fips.md
@@ -0,0 +1,43 @@
+# FIPS 140-2
+
+
+
+The Federal Information Processing Standard (FIPS) 140-2 publication describes United States government-approved security requirements for cryptographic modules. [FIPS-140](https://csrc.nist.gov/publications/detail/fips/140/2/final) series is a collection of computer security standards set by the National Institute of Standards and Technology (NIST) for the United States government. FIPS 140–2 defines the critical security parameters vendors must use for encryption before selling their products to the U.S government.
+
+For a fully FIPS-compliant deployment of immudb a few things are required:
+
+- immudb must be compiled with a FIPS validated cryptographic module
+- immudb must be configured to use FIPS-approved cryptographic algorithms
+- immudb components (immuadmin and immuclient) must be compiled with a FIPS-validated cryptographic module
+
+For immudb, adherence to FIPS 140-2 is ensured by:
+
+- Using FIPS approved / NIST-recommended cryptographic algorithms through the use of `goboring/golang` container image. Since the native go crypto standard library is not FIPS compliant, we use the Google-provided Go implementation that has patches on top of standard Go to enable integrating BoringCrypto. immudb components are built with this image as a build base.
+- Enabling [`fipsonly`](https://go.googlesource.com/go/+/dev.boringcrypto/src/crypto/tls/fipsonly/fipsonly.go) mode to restrict all TLS configuration in immudb binaries to FIPS-approved settings.
+
+
+
+
+
+### Limitations
+
+- Currently the builds with FIPS-compliance are only available on `linux-amd64` architecture.
+- There is an overhead in calling into BoringCrypto via cgo for the crypto library functions, which incurs a performance penalty. The library performs slower than the built-in crypto library. Hence you could see a performance drop of ~15% when using a FIPS-compliant immudb server.
+
+
+
+
+
+### Using FIPS-compliant binaries
+
+You can download the immudb binary from the [latest releases](https://github.com/codenotary/immudb/releases) on Github. The FIPS-compliant binaries have a `-fips` suffix. (e.g. immudb-v1.4.x-Linux-amd64-fips)
+
+
+
+
+
+### Using FIPS-compliant docker images
+
+You can pull immudb FIPS-compliant docker images from [DockerHub](https://hub.docker.com/r/codenotary/immudb) and run it in a ready-to-use container. The FIPS-compliant docker images have a `-fips` suffix. (e.g. codenotary/immudb-fips:latest)
+
+
diff --git a/src/1.9.5/production/index-maintenance.md b/src/1.9.5/production/index-maintenance.md
new file mode 100644
index 0000000000..de90601e26
--- /dev/null
+++ b/src/1.9.5/production/index-maintenance.md
@@ -0,0 +1,140 @@
+# Index cleaning
+
+
+
+Maintaining a healthy disk usage is crucial. immudb has two operations operations aiming to remove unreferenced data from the index.
+A full index clean-up is achieved by calling `CompactIndex`, which is a routine that creates a fresh index based on the current state, removing all intermediate data generated over time.
+The index is generated asynchronously, so new transactions may take place while it is created. As a result, if the server is constantly overloaded, there will likely be blocking times when the newly compacted index replaces the current one.
+
+In the case of continuous load on the server, the `FlushIndex` operation may be used instead. It will dump the current index into disk while partly removing unreferenced data. The `cleanupPercentage` attribute indicates how much space will be scanned for unreferenced data. Even though this operation blocks transaction processing, choosing a small percentage e.g. 0.1 may not significantly hinder normal operations while reducing used storage space.
+
+Partial compaction may be triggered automatically by immudb. Database settings can be modified to set the `cleanupPercentage` attribute to non-zero in order to accomplish this.
+
+
+
+:::: tabs
+
+::: tab Go
+<<< @/src/code-examples/go/maintenance-index/main.go
+:::
+
+::: tab Java
+
+```java
+package io.codenotary.immudb.helloworld;
+
+import io.codenotary.immudb4j.*;
+
+public class App {
+
+ public static void main(String[] args) {
+ FileImmuStateHolder stateHolder = FileImmuStateHolder.newBuilder()
+ .withStatesFolder("./immudb_states")
+ .build();
+
+ ImmuClient client = ImmuClient.newBuilder()
+ .withServerUrl("127.0.0.1")
+ .withServerPort(3322)
+ .withStateHolder(stateHolder)
+ .build();
+
+ client.login("immudb", "immudb");
+
+ // partial index cleanup
+ client.flushIndex(0.1, false);
+
+ // full async index cleanup
+ client.cleanIndex();
+ }
+
+}
+```
+:::
+
+::: tab .NET
+
+```csharp
+
+var client = new ImmuClient();
+await client.Open("immudb", "immudb", "defaultdb");
+
+await client.FlushIndex(0.1f, false);
+
+await client.Close();
+
+```
+
+:::
+
+::: tab Python
+This feature is not yet supported or not documented.
+Do you want to make a feature request or help out? Open an issue on [Python sdk github project](https://github.com/codenotary/immudb-py/issues/new)
+:::
+
+::: tab Node.js
+This feature is not yet supported or not documented.
+Do you want to make a feature request or help out? Open an issue on [Node.js sdk github project](https://github.com/codenotary/immudb-node/issues/new)
+:::
+
+::: tab Others
+If you're using another development language, please refer to the [immugw](../connecting/immugw.md) option.
+:::
+
+::::
+
+
+
+## How indexing works
+
+immudb uses a btree to index key-value entries. While the key is the same submitted by the client, the value stored in the btree is an offset to the file where the actual value as stored, its size and hash value.
+
+The btree is keep in memory as new data is inserted, getting a key or even the historical values of a key can directly be made by using a mutex lock on the btree but scanning by prefix requires the tree to be stored into disk, this is referred as a snapshot.
+The persistence is implemented in append-only mode, thus whenever a snapshot is created (btree flushed to disk), updated and new nodes are appended to the file, while new or updated nodes may be linked to unmodified nodes (already written into disk) and those unmodified nodes are not rewritten.
+
+The snapshot creation does not necessarily take place upon each scan by prefix, it's possible to reuse an already created one, client can provide his requirements on how new the snapshot should be by providing a transaction ID which at least must be indexed (sinceTx).
+
+After some time, several snapshots may be created (specified by flushAfter properties of the btree and the scan requests), the file backing the btree will hold several old snapshots. Thus the clean index process will dump to a different location only the latest snapshot but in this case also writing the unmodified nodes. Once that dump is done, the index folder is replaced by the new one.
+
+While the clean process is made, no data is indexed and there will be an extra disk space requirement due to the new dump. Once completed, a considerable disk space will be reduced by removing the previously indexed data (older snapshots).
+The btree and clean up process is something specific to indexing. And will not lock transaction processing as indexing is asynchronously generated.
+
+
+
+
+
+## compactor tool
+
+To manage index compaction, you can use the [compactor](https://github.com/codenotary/immudb-tools/tree/main/compactor) tool,
+part of the [immudb-tools](https://github.com/codenotary/immudb-tools) repository.
+
+This tool can be used to perform periodic maintenance on your database indexes, or to configure online compaction.
+
+The maintenance can be performed in three different ways:
+- online compaction
+- percentage compaction
+- full flush
+
+In all three modes, new indexes are calculated and old one are discarded. Indexes are organized in chunk files; each time a file only contains discarded indexes, it is automatically deleted.
+
+### Online compaction
+
+This kind of compaction is performed by immudb during normal write operations: once the amount of new written data reaches the percentage threshold configured per one database, immudb cleans up specified percentage of the index data, discarding old unreferenced data.
+
+For every database, users can specify a percentage of total written data to be reindexed on every write.
+
+The compactor tool can be used to enable this mode, and to set the percentage threshold. Once this is done, there is no need to run compactor tool periodically: the compaction will happen automatically.
+
+
+### Flush compaction
+
+In this mode, the tool calls for immudb to immediately perform a partial compaction, reindexing the oldest data up to the specified percentage. It is similar to the previous mode, but it is performed immediately and must be periodically issued.
+The advantage is that you have control on the time when compaction is performed, so that you can leverage periods of less intense activity (e.g.: weekends or nights).
+
+
+### Full compaction
+
+All indexes are rebuilt. Very resource intensive, but it gives you the most compact representation of indexes.
+
+You can get more information in the [README](https://github.com/codenotary/immudb-tools/tree/main/compactor)
+
+
diff --git a/src/1.9.5/production/monitoring.md b/src/1.9.5/production/monitoring.md
new file mode 100644
index 0000000000..f6978988b4
--- /dev/null
+++ b/src/1.9.5/production/monitoring.md
@@ -0,0 +1,517 @@
+# Health Monitoring
+
+
+
+## Prometheus metrics
+
+immudb exposes a Prometheus end-point, by default on port 9497 on `/metrics`.
+
+```bash
+$ curl -s http://localhost:9497/metrics
+# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 1.3355e-05
+go_gc_duration_seconds{quantile="0.25"} 1.3615e-05
+go_gc_duration_seconds{quantile="0.5"} 1.9991e-05
+go_gc_duration_seconds{quantile="0.75"} 3.0348e-05
+go_gc_duration_seconds{quantile="1"} 3.3859e-05
+go_gc_duration_seconds_sum 0.000151623
+go_gc_duration_seconds_count 7
+# HELP go_goroutines Number of goroutines that currently exist.
+...
+```
+
+Querying metrics with a simple curl command is not a very practical solution. immudb has predefined Grafana dashboard visualizing some of the key metrics. This dashboard can be downloaded from [immudb github repository][grafana-dashboard].
+
+[grafana-dashboard]: https://github.com/codenotary/immudb/blob/master/tools/monitoring/grafana-dashboard.json
+
+
+
+![immudb grafana stats](/immudb/grafana-immudb.png)
+
+
+
+You can also use `immuadmin stats` to see these metrics without additional tools:
+
+```bash
+./immuadmin stats
+```
+
+
+
+![immuadmin stats](/immudb/immuadmin-stats.png)
+
+
+
+immudb exports the standard Go metrics, so dashboards like [Go metrics](https://grafana.com/grafana/dashboards/10826) work out of the box.
+
+
+
+![immuadmin stats](/immudb/grafana-go.jpg)
+
+
+
+For very simple cases, you can use `immuadmin status` from monitoring scripts to ping the server:
+
+```bash
+$ ./immuadmin status
+OK - server is reachable and responding to queries
+```
+
+
+
+
+
+## Database size
+
+Following metric contains the information about the disk space usage in bytes for each individual database:
+
+```bash
+$ curl -s localhost:9497/metrics | grep immudb_db_size_bytes
+# HELP immudb_db_size_bytes Database size in bytes.
+# TYPE immudb_db_size_bytes gauge
+immudb_db_size_bytes{db="defaultdb"} 2929
+immudb_db_size_bytes{db="systemdb"} 3789
+```
+
+The official [grafana dashboard][grafana-dashboard] is using this metric
+to show the amount of disk space used by databases and the growth of disk usage over time.
+
+Those dashboards can be used to forecast disk usage and make sure it’s not getting out of control.
+
+
+
+![database size](/immudb/metrics-dbsize.jpg)
+
+
+
+
+
+
+
+## Database entries
+
+Following prometheus metric calculates the number of new KV entries added to the database since the start of the process:
+
+```bash
+$ curl -s http://localhost:9497/metrics | grep immudb_number_of_stored_entries
+# HELP immudb_number_of_stored_entries Number of key-value entries currently stored by the database.
+# TYPE immudb_number_of_stored_entries gauge
+immudb_number_of_stored_entries{db="defaultdb"} 1
+immudb_number_of_stored_entries{db="systemdb"} 2
+```
+
+[Grafana dashboard][grafana-dashboard] show the amount of new DB entries and the insertion rate.
+This chart does not display the total amount of KV entries and will be reset to zero upon immudb restart.
+
+
+
+![database entries](/immudb/metrics-entries.jpg)
+
+
+
+
+
+
+
+## Indexer metrics
+
+Immudb does expose metrics related to internal indexing process:
+
+```bash
+$ curl -s http://localhost:9497/metrics | grep 'immudb_last_.*_trx_id'
+# HELP immudb_last_committed_trx_id The highest id of committed transaction
+# TYPE immudb_last_committed_trx_id gauge
+immudb_last_committed_trx_id{db="defaultdb"} 1
+immudb_last_committed_trx_id{db="systemdb"} 2
+# HELP immudb_last_indexed_trx_id The highest id of indexed transaction
+# TYPE immudb_last_indexed_trx_id gauge
+immudb_last_indexed_trx_id{db="defaultdb"} 1
+immudb_last_indexed_trx_id{db="systemdb"} 2
+```
+
+Those metrics are used on various graphs on the [Grafana dashboard][grafana-dashboard].
+
+
+
+
+
+### Indexed %
+
+This graph shows the total percentage of all transactions in the database that has been indexed so far.
+In a healthy situation, this chart should remain at or close to 100%.
+If this value starts dropping down, that means that the data ingestion rate is higher than the indexing rate
+indicating that additional rate limiting should be added to db writers.
+
+
+
+![database entries](/immudb/metrics-indexed-percent.jpg)
+
+
+
+
+
+
+
+### Indexing / Commit rate
+
+This chart shows the rate of new transactions added to the database and the rate of indexing those transactions. If the indexing rate is smaller than the commit rate, this means that the database isn’t keeping up with the indexing. In applications where only synchronous writes are performed or where data can be immediately indexed, the indexing rate line (Idx) and commit rate line (Cmt) will overlap.
+
+
+
+![database entries](/immudb/metrics-indexing-rate.jpg)
+
+
+
+
+
+
+
+### TRXs Left to Index
+
+This chart shows the number of transactions waiting for indexing. This value should be close to zero and should have a decreasing tendency.
+
+
+
+![database entries](/immudb/metrics-indexing-left.jpg)
+
+
+
+
+
+
+
+### TRX Count
+
+This chart shows the total number of transactions in the database.
+
+
+
+![database entries](/immudb/metrics-trx-count.jpg)
+
+
+
+
+
+
+
+## Btree metrics
+
+immudb exposes various metrics per btree instance. The `id` label is a relative path for the location of the btree on disk.
+Currently there's a single btree instance per one database.
+
+```bash
+$ curl -s http://localhost:9497/metrics | grep 'immudb_btree_'
+# HELP immudb_btree_depth Btree depth
+# TYPE immudb_btree_depth gauge
+immudb_btree_depth{id="data/defaultdb/index"} 1
+immudb_btree_depth{id="data/systemdb/index"} 1
+# HELP immudb_btree_flushed_entries_last_cycle Numbers of btree entries written to disk during the last flush process
+# TYPE immudb_btree_flushed_entries_last_cycle gauge
+immudb_btree_flushed_entries_last_cycle{id="data/defaultdb/index"} 1
+immudb_btree_flushed_entries_last_cycle{id="data/systemdb/index"} 2
+# HELP immudb_btree_flushed_entries_total Number of btree entries written to disk during flush since the immudb process was started
+# TYPE immudb_btree_flushed_entries_total counter
+immudb_btree_flushed_entries_total{id="data/defaultdb/index"} 1
+immudb_btree_flushed_entries_total{id="data/systemdb/index"} 3
+# HELP immudb_btree_flushed_nodes_last_cycle Numbers of btree nodes written to disk during the last flush process
+# TYPE immudb_btree_flushed_nodes_last_cycle gauge
+immudb_btree_flushed_nodes_last_cycle{id="data/defaultdb/index",kind="inner"} 0
+immudb_btree_flushed_nodes_last_cycle{id="data/defaultdb/index",kind="leaf"} 1
+immudb_btree_flushed_nodes_last_cycle{id="data/systemdb/index",kind="inner"} 0
+immudb_btree_flushed_nodes_last_cycle{id="data/systemdb/index",kind="leaf"} 1
+# HELP immudb_btree_flushed_nodes_total Number of btree nodes written to disk during flush since the immudb process was started
+# TYPE immudb_btree_flushed_nodes_total counter
+immudb_btree_flushed_nodes_total{id="data/defaultdb/index",kind="inner"} 0
+immudb_btree_flushed_nodes_total{id="data/defaultdb/index",kind="leaf"} 2
+immudb_btree_flushed_nodes_total{id="data/systemdb/index",kind="inner"} 0
+immudb_btree_flushed_nodes_total{id="data/systemdb/index",kind="leaf"} 3
+# HELP immudb_btree_leaf_node_entries Histogram of number of entries in as single leaf btree node, calculated when visiting btree nodes
+# TYPE immudb_btree_leaf_node_entries histogram
+immudb_btree_leaf_node_entries_bucket{id="data/defaultdb/index",le="1"} 7
+immudb_btree_leaf_node_entries_bucket{id="data/defaultdb/index",le="2"} 7
+....
+immudb_btree_leaf_node_entries_sum{id="data/defaultdb/index"} 4
+immudb_btree_leaf_node_entries_count{id="data/defaultdb/index"} 7
+immudb_btree_leaf_node_entries_bucket{id="data/systemdb/index",le="1"} 8
+immudb_btree_leaf_node_entries_bucket{id="data/systemdb/index",le="2"} 11
+....
+immudb_btree_leaf_node_entries_sum{id="data/systemdb/index"} 11
+immudb_btree_leaf_node_entries_count{id="data/systemdb/index"} 11
+# HELP immudb_btree_nodes_data_begin Beginning offset for btree nodes data file
+# TYPE immudb_btree_nodes_data_begin gauge
+immudb_btree_nodes_data_begin{id="data/defaultdb/index"} 0
+immudb_btree_nodes_data_begin{id="data/systemdb/index"} 0
+# HELP immudb_btree_nodes_data_end End offset for btree nodes data appendable
+# TYPE immudb_btree_nodes_data_end gauge
+immudb_btree_nodes_data_end{id="data/defaultdb/index"} 100
+immudb_btree_nodes_data_end{id="data/systemdb/index"} 281
+```
+
+[Grafana dashboard][grafana-dashboard] exposes both some basic and more advanced btree statistics.
+
+
+
+
+
+### Btree Cache Size / Btree Cache Hit %
+
+Those two charts show internal statistics about immudb btree cache.
+In order to avoid reading large amounts of data on every btree operation,
+immudb keeps an in-memory cache of recently used btree nodes.
+The amount of nodes in the cache is shown in the first chart,
+it is capped at the maximum amount of cache entries.
+
+The second chart shows how effective the cache is
+presenting the percentage of btree node lookups that were optimized with the cache.
+For small databases, it’s very likely that this hit ratio will be close to 100%,
+but it will drop down once the amount of data increases.
+There’s no single rule on what value we should expect here.
+In our internal tests even 40% cache hit ratios with workloads using keys with random distribution were still yielding very good performance results.
+To get higher cache utilization, applications should prefer working on keys close to themselves -
+such as using sequentially increasing numbers where newly inserted data will end up
+in the btree portion close to previously accessed entries.
+
+
+
+![database entries](/immudb/metrics-btree-hit.jpg)
+
+
+
+
+
+
+
+### Btree Depth
+
+This chart shows the depth of the tree.
+Since btrees are auto-balancing data structures,
+this depth will have a logarithmic tendency.
+The depth of the tree indicates what is the amount of nodes traversed by each btree operation.
+
+
+
+![database entries](/immudb/metrics-btree-depth.jpg)
+
+
+
+
+
+
+
+### Btree Child Node Count Distributions
+
+These graphs show the distribution of the amount of child nodes.
+In a healthy btree like the one below,
+the amount of child nodes should be focused around a single value (40 in the example).
+Also the amount of child nodes should be kept at sane levels -
+values below 10 or above few hundred are a good indication that the btree isn’t performing well
+and the application should consider using keys of different,
+more uniform and shorter lengths for its data.
+
+_Note: These statistics are gathered when traversing the btree, if there’s no activity in the database the distribution will be flat._
+
+
+
+![database entries](/immudb/metrics-btree-distribution.jpg)
+
+
+
+
+
+
+
+### Flush Statistics
+
+immudb keeps recent btree changes in memory to reduce the amount of data to be written to disk.
+In order to persist those changes, there’s a btree flush process called once a threshold of new and modified entries is reached.
+
+
+
+![database entries](/immudb/metrics-btree-flush-entries.jpg)
+![database entries](/immudb/metrics-btree-flush-nodes.jpg)
+
+
+
+These metrics are calculated for nodes (both inner and leaf ones) and separately for KV entries in the leaf nodes.
+
+The flush rate shows the rate of written nodes / entries per second.
+It clearly shows where the flush process started and where it ended.
+
+Flush progress metrics for each flush cycle starts at zero and reach the total amount of entries processed during such single flush operation.
+The next flush will repeat the process by starting from zero reaching the maximum value.
+By looking at those maximum values, we can see how much data needs to be written to disk during flush operations.
+During normal DB operation, it should be steady over time.
+An unbound growth of those maximums could indicate that the flush operation is too aggressive and the threshold should be adjusted.
+
+
+
+
+
+### Compaction Statistics
+
+Similarly to flush statistics, immudb exposes the same set of values for full compaction.
+
+_Note: these values are gathered for overall compaction that fully rewrites the btree structure. immudb 1.2.3 introduced a new online compaction mode that gradually removes unused btree data during flush operation. This new compaction mode is not included in those charts._
+
+
+
+![database entries](/immudb/metrics-btree-compaction-entries.jpg)
+![database entries](/immudb/metrics-btree-compaction-nodes.jpg)
+
+
+
+
+
+
+
+### Data Size for Btree Nodes
+
+immudb internally uses append-only files to store data.
+That is also used for btree nodes.
+We don’t replace existing data on disk.
+Instead we append a new modified version at the end of the data stream.
+immudb 1.2.3 introduced new online compaction of btree data that deals with garbage
+that naturally accumulates at the beginning of such data stream over time.
+This chart can be used to ensure that the amount of data being used for btree nodes
+is being reduced whenever a cleanup operation is performed.
+
+
+
+![database entries](/immudb/metrics-btree-data-size.jpg)
+
+
+
+
+
+
+
+## S3 storage metrics
+
+Various metrics are exposed when working with remote storage such as S3.
+Those can be used to help analyzing s3 performance and resource consumption.
+
+```bash
+$ curl -s http://localhost:9497/metrics | grep 'remote'
+# HELP immudb_remoteapp_chunk_bytes Total number of bytes stored in chunks
+# TYPE immudb_remoteapp_chunk_bytes gauge
+....
+immudb_remoteapp_chunk_bytes{path="defaultdb/aht/tree/",state="Active"} 133631
+immudb_remoteapp_chunk_bytes{path="defaultdb/aht/tree/",state="Cleaning"} 0
+immudb_remoteapp_chunk_bytes{path="defaultdb/aht/tree/",state="DownloadError"} 0
+immudb_remoteapp_chunk_bytes{path="defaultdb/aht/tree/",state="Downloading"} 0
+immudb_remoteapp_chunk_bytes{path="defaultdb/aht/tree/",state="Invalid"} 0
+immudb_remoteapp_chunk_bytes{path="defaultdb/aht/tree/",state="Local"} 0
+immudb_remoteapp_chunk_bytes{path="defaultdb/aht/tree/",state="Remote"} 1.048767e+06
+immudb_remoteapp_chunk_bytes{path="defaultdb/aht/tree/",state="UploadError"} 0
+immudb_remoteapp_chunk_bytes{path="defaultdb/aht/tree/",state="Uploading"} 0
+...
+# HELP immudb_remoteapp_chunk_count Number of chunks used for immudb remote storage
+# TYPE immudb_remoteapp_chunk_count gauge
+....
+immudb_remoteapp_chunk_count{path="defaultdb/aht/tree/",state="Active"} 1
+immudb_remoteapp_chunk_count{path="defaultdb/aht/tree/",state="Cleaning"} 0
+immudb_remoteapp_chunk_count{path="defaultdb/aht/tree/",state="DownloadError"} 0
+immudb_remoteapp_chunk_count{path="defaultdb/aht/tree/",state="Downloading"} 0
+immudb_remoteapp_chunk_count{path="defaultdb/aht/tree/",state="Invalid"} 0
+immudb_remoteapp_chunk_count{path="defaultdb/aht/tree/",state="Local"} 0
+immudb_remoteapp_chunk_count{path="defaultdb/aht/tree/",state="Remote"} 1
+....
+# HELP immudb_remoteapp_corrupted_metadata Number of corrupted metadata detections in an immudb remote storage
+# TYPE immudb_remoteapp_corrupted_metadata counter
+immudb_remoteapp_corrupted_metadata 0
+# HELP immudb_remoteapp_download_events Immudb remote storage download event counters
+# TYPE immudb_remoteapp_download_events counter
+immudb_remoteapp_download_events{event="cancelled"} 0
+immudb_remoteapp_download_events{event="failed"} 0
+immudb_remoteapp_download_events{event="finished"} 0
+immudb_remoteapp_download_events{event="retried"} 0
+immudb_remoteapp_download_events{event="started"} 0
+immudb_remoteapp_download_events{event="succeeded"} 0
+# HELP immudb_remoteapp_open_time Histogram of the total time required to open a chunk of data stored on an immudb remote storage
+# TYPE immudb_remoteapp_open_time histogram
+immudb_remoteapp_open_time_bucket{le="0.1"} 0
+immudb_remoteapp_open_time_bucket{le="0.25"} 0
+....
+immudb_remoteapp_open_time_sum 0.747774699
+immudb_remoteapp_open_time_count 2
+# HELP immudb_remoteapp_read_bytes Total number of bytes read from immudb remote storage (including cached reads)
+# TYPE immudb_remoteapp_read_bytes counter
+immudb_remoteapp_read_bytes 0
+# HELP immudb_remoteapp_read_events Read event counters for immudb remote storage
+# TYPE immudb_remoteapp_read_events counter
+immudb_remoteapp_read_events{event="errors"} 0
+immudb_remoteapp_read_events{event="total_reads"} 0
+# HELP immudb_remoteapp_s3_download_bytes Number data bytes (excluding headers) downloaded from s3
+# TYPE immudb_remoteapp_s3_download_bytes counter
+immudb_remoteapp_s3_download_bytes 2.097657e+06
+# HELP immudb_remoteapp_s3_upload_bytes Number data bytes (excluding headers) uploaded to s3
+# TYPE immudb_remoteapp_s3_upload_bytes counter
+immudb_remoteapp_s3_upload_bytes 2.097657e+06
+# HELP immudb_remoteapp_uncached_read_bytes Direct (uncached) read byte counters for immudb remote storage
+# TYPE immudb_remoteapp_uncached_read_bytes counter
+immudb_remoteapp_uncached_read_bytes 2.097645e+06
+# HELP immudb_remoteapp_uncached_read_events Direct (uncached) read event counters for immudb remote storage
+# TYPE immudb_remoteapp_uncached_read_events counter
+immudb_remoteapp_uncached_read_events{event="errors"} 0
+immudb_remoteapp_uncached_read_events{event="total_reads"} 2
+# HELP immudb_remoteapp_upload_events Immudb remote storage upload event counters
+# TYPE immudb_remoteapp_upload_events counter
+immudb_remoteapp_upload_events{event="cancelled"} 0
+immudb_remoteapp_upload_events{event="failed"} 0
+immudb_remoteapp_upload_events{event="finished"} 2
+immudb_remoteapp_upload_events{event="retried"} 0
+immudb_remoteapp_upload_events{event="started"} 2
+immudb_remoteapp_upload_events{event="succeeded"} 2
+# HELP immudb_remoteapp_upload_time Histogram of the total time required to upload a chunk to the remote storage
+# TYPE immudb_remoteapp_upload_time histogram
+immudb_remoteapp_upload_time_bucket{le="0.1"} 0
+immudb_remoteapp_upload_time_bucket{le="0.25"} 1
+....
+immudb_remoteapp_upload_time_sum 0.983413478
+immudb_remoteapp_upload_time_count 2
+```
+
+The section related to S3 in the [Grafana dashboard][grafana-dashboard] is in its early days
+but already shows some basic insights into what the data performance is using immudb
+with an AWS S3 backed data store.
+The first chart shows the histogram of a single s3 object read duration,
+the second one shows the inbound traffic needed for immudb operations.
+If those values are high,
+you should consider switching back to local disk and an alternative remote storage back-end such as EBS volumes.
+
+
+
+![database entries](/immudb/metrics-s3.jpg)
+
+
+
+
+
+
+
+## immuguard health check tool
+
+To help you check global immudb health, and reliably deploy immudb in production environment, you can use immuguard,
+the health check tool you can find in the [immudb-tools](https://github.com/codenotary/immudb-tools) repository.
+
+This simple tool periodically polls immudb, checking the state of every loaded database, and exposes a simple REST endpoint
+that can be used by docker or kubernetes probes. You simply have to configure it to talk to immudb, by providing its address
+and some credentials.
+
+Then you can poll endpoint `/immustatus` on port 8085. If immudb is operating normally and all databases are responsive, you will get a 200 HTTP return code, and the string "OK". You will get a 500 HTTP code instead, and the string "FAIL" if immudb is not responding.
+
+This configuration snippet shows how to use the `/immustatus` endpoint on kubernetes:
+
+```yaml
+livenessProbe:
+ httpGet:
+ path: /immustatus
+ port: 8085
+ failureThreshold: 5
+ periodSeconds: 60
+```
+
+You can find more information about it on its [README](https://github.com/codenotary/immudb-tools/tree/main/immuguard) page.
+
+
diff --git a/src/1.9.5/production/performance-guide.md b/src/1.9.5/production/performance-guide.md
new file mode 100644
index 0000000000..da873a8e28
--- /dev/null
+++ b/src/1.9.5/production/performance-guide.md
@@ -0,0 +1,198 @@
+# Performance guide
+
+
+
+## Dependency on the B-Tree index
+
+Immudb is built in a layered approach.
+The most low-level layer in immudb is an immutable log of changes.
+An atomic entry in this log level corresponds to a single transaction.
+Each transaction has an ID assigned - those IDs are increasing monotonically
+thus it is easy to reference a specific transaction ID.
+
+Each transaction contains a list of Key-Metadata-Value entries which correspond to
+changes to the database made within such transaction.
+
+Transactions ending up in immudb are protected from tampering attacks
+by the parallel Merkle tree structure built from the transaction data.
+
+By default immudb builds an additional index based on a B-tree structure for fast key lookup.
+This B-Tree is built in an asynchronous routine in parallel to the write operations.
+This asynchronous nature of indexing can be used to gain significant performance gains
+by avoiding strict dependency on the indexing.
+
+```text
++------------------+
+| SQL |
++---------+--------+
+ |
+ v
++------------------+
+| KV (Indexed) |
++---------+--------+
+ |
+ v
++------------------+
+| Log |
++---------+--------+
+```
+
+### Using immudb as an immutable ledger
+
+To achieve the best performance immudb can be used as an immutable ledger only.
+In such case the index can be completely ignored to avoid performance and time penalty due to indexing.
+Below is the list of operations that do not rely on the index at all.
+
+* Data should be inserted into the database in asynchronous mode
+ that is enabled by setting the `noWait` GRPC argument to `true`.
+ To avoid dependency on the index one should also avoid using
+ conditional writes and [ExecAll][exec-all] operations.
+* To read the data from the database, use one of the following operations:
+ * [GetAt][get-at-since] to get value of a key set at given tx ID,
+ * [GetAtTxID][get-at-TxID] to get all entries from given transaction,
+ * [TxScan][tx-scan] to perform an enhanced scan of entries within one transaction.
+ Standard retrieval of the most recent value for given key should not be used.
+ In most cases this means that the transaction ID must be stored in some external persistent storage.
+
+The SQL layer heavily depends on the B-tree index thus it can not be used when immudb is
+treated as an immutable ledger only.
+
+[get-at-since]: ../develop/reading.md#get-at-and-since-a-transaction
+[get-at-TxID]: ../develop/reading.md#get-at-txid
+[tx-scan]: ../develop/transactions.md#txscan
+[exec-all]: ../develop/transactions.md#execall
+[cond-write]: ../develop/reading.md#conditional-writes
+
+### Indexed KV layer - asynchronous
+
+Working with Key-Value entries is hard without the ability to quickly get the value behind some key
+which require the B-Tree index structure.
+
+The B-Tree index is built in an asynchronous mode - once data is inserted into the transaction log,
+a separate routine periodically updates it.
+By default all immudb operations wait for the index to be up to date
+to ensure that the most recent writes are already fully processed.
+In some use cases such waiting for the indexer is not necessary and can be skipped
+or reduced that leads to a much greater performance of the application.
+Below is the list of operations that adjust the index processing requirements.
+
+* Data can be inserted into the database in asynchronous mode
+ that is enabled by setting the `noWait` GRPC argument to `true`.
+ By doing so, the data is inserted as quickly as possible into the
+ transaction log and right after that, the response is sent to the caller
+ without waiting for the index at all.
+ To avoid dependency on the btree one must also avoid using
+ [conditional writes][cond-write] and [ExecAll][exec-all] operations that implicitly require
+ up-to-date index.
+* Reading the data from the database should be done by using one of the following operations:
+ * [GetAt][get-at-since] to get value of a key set at given tx ID,
+ * [GetAtTxID][get-at-TxID] to get all entries from given transaction,
+ * [GetSince][get-at-since] where the indexer is only required to process up to given transaction ID,
+ * [TxScan][tx-scan] to perform an enhanced scan of entries within one transaction.
+
+The SQL layer heavily depends on the B-tree index thus it can not be used when relaxed indexing requirements are used.
+
+### Indexed KV layer - synchronous
+
+This mode of operation is very similar to the asynchronous one but with the requirement
+that the B-tree index must be up-to-date. This is the default mode that immudb operates in.
+
+When using immudb in synchronous mode, all functionalities of immudb's KV interface can be used.
+Certain operations such as [ExecAll][exec-all] or [conditional writes][cond-write] require up-to-date index
+and should only be used when there's a guarantee that those will meet the performance
+requirements of the application.
+
+### SQL
+
+When immudb is used as an SQL database, all operations require an up to date index.
+This means that optimizations that relax B-tree indexing requirements can not be used.
+
+The SQL layer in immudb is built on top of the KV layer.
+For each row in SQL table one entry is generated for the primary key.
+That entry contains the whole row data.
+
+In addition to that, for every index on that table, one additional KV entry
+is added that keeps the reference to the original entry created for the primary key.
+
+
+
+
+
+## Data modelling
+
+Applications can structure their data in many different ways.
+The chosen data model will affect the performance of the immudb.
+Below are some tips that can help optimizing and selecting the correct model for the data.
+
+### KV layer - key length
+
+Short keys should be preferred over long ones. Long keys naturally increase the disk space usage
+increasing the overall IO pressure but also affect the shape of the B-Tree. The longer keys are used,
+the deeper B-tree will become and thus a longer B-tree traversal needs to be performed to find specific
+key entry. Larger B-tree will also mean that the internal B-tree cache will perform much worse reducing
+hit/miss ratio.
+
+### KV layer - sequential keys
+
+Sequentially built keys (like those based on monotonically increasing numbers)
+should be preferred over randomly generated ones.
+This directly relates to hit/miss ration of the the B-tree cache.
+Sequential keys tend to use similar parts of the B-tree thus there is a much higher
+probability that B-tree traversal will use cached nodes.
+
+This is especially important when immudb is used with an S3 remote storage.
+Any B-tree cache miss will result in a heavy read operation for the S3 server.
+
+### SQL layer - indexes
+
+All SQL operations in immudb are lowered to operations on the KV layer.
+To optimize the performance of the SQL engine it is thus important to
+understand how immudb generates keys for the KV layer from SQL data.
+
+Low-level keys generated by the SQL layer are directly related to
+SQL indexes. Each index consists of serialized values of columns
+that are part of the index. This means that the more columns are in
+the index, the longer keys will be produced. Same happens with
+column types. Small types such as INTEGER will result in short
+low-level key where larger ones (such as VARCHAR with large limit)
+will produce very long keys.
+
+Each table can have multiple indexes where each new index will
+generate new entries inserted into the KV layer. It is thus important
+to avoid creating unnecessary indexes.
+
+
+
+
+
+## immudb replicas
+
+immudb offers [replication](replication.md) mechanism where replicas follow the leader
+node cloning its data.
+
+Such replica nodes can be used to handle read operations reducing the
+load in the leader node. In such scenario it is important to ensure
+that the replica is following the leader in asynchronous mode
+resulting in an eventual consistency guarantees.
+
+Replication can be configured in various ways including tree-like topology
+and multiple replication levels (like [replicas of replicas](replication.md#replica-of-a-replica)).
+With such feature, the immudb cluster can be scaled to a very large topology with the ability
+to handle huge read workloads.
+
+
+
+
+
+## embedded immudb vs standalone service
+
+immudb can be easily [embedded](../embedded/embedding.md) into other golang applications.
+By doing so, the application does access immudb data directly without additional TCP connection.
+That way the additional cost of handling TCP connectivity, GRPC serialization etc. are removed.
+
+It is important to note however that the embedded immudb interface is much simpler
+than the GRPC API - e.g. it has no direct support for references or sorted sets.
+In addition to that, any networking-related features such as replication or backups
+must be handled by the application itself.
+
+
diff --git a/src/1.9.5/production/planning.md b/src/1.9.5/production/planning.md
new file mode 100644
index 0000000000..af95a41cb4
--- /dev/null
+++ b/src/1.9.5/production/planning.md
@@ -0,0 +1,35 @@
+
+# Planning
+
+
+
+Before running a database in production, it is important to plan:
+
+- Computing resources
+- Disk space
+- Configuration
+- Backups
+- Health Monitoring
+
+### Computing Resources
+
+immudb was designed to have a stable memory/CPU footprint.
+
+Memory is pre-allocated based on specified maximum concurrency, maximum number of entries per transaction, cache sizes, etc.
+
+With the default settings, it's possible to stress immudb and memory usage should stay around 1.5GB (assuming low-sized values). Otherwise, memory will be needed to maintain the values within a transaction during commit time.
+
+### Disk space and data location
+
+immudb is an immutable database, this means all history is preserved and therefore disk usage is higher than a normal database.
+
+Data is stored in the directory specified by the `dir` option.
+
+
+
+
+
+
+
+
+
diff --git a/src/1.9.5/production/replication.md b/src/1.9.5/production/replication.md
new file mode 100644
index 0000000000..a5a879afac
--- /dev/null
+++ b/src/1.9.5/production/replication.md
@@ -0,0 +1,169 @@
+
+# Replication
+
+
+
+### Replication strategy
+
+immudb includes support for replication by means of a follower approach. A database can be created or configured either to be a primary or a replica of another database.
+
+
+
+![replication using grpc clients](/immudb/replication-servers.jpg)
+
+
+
+During replication, primary databases have a passive role. The grpc endpoint `ExportTx` is used by replicas to fetch unseen committed transactions from the primary.
+
+Replicas are read only and any direct write operation will be rejected. Using replicas allow to distribute query loads.
+
+
+
+![replicator fetches committed txs via grpc calls and replicate them using in-process method invocations](/immudb/replication-comm.jpg)
+
+
+
+
+
+
+### Replication and users
+
+As shown in the diagram above, the replicator fetches committed transaction from the primary via grpc calls. Internally, the replicator instantiates an immudb client (using the official golang SDK) and fetches unseen committed transactions from the primary. In order to do so, the replicator requires valid user credentials with admin permissions, otherwise the primary will reject any request.
+
+
+
+
+
+### Creating a replica
+
+Creating a replica of an existent database using immuadmin is super easy:
+
+```bash
+$ ./immuadmin login immudb
+Password:
+logged in
+$ ./immuadmin database create \
+ --replication-is-replica \
+ --replication-primary-username=immudb \
+ --replication-primary-password=immudb \
+ --replication-primary-database=defaultdb \
+ replicadb
+database 'replicadb' {replica: true} successfully created
+```
+
+::: tip
+
+Display all database creation flags with:
+
+```bash
+$ ./immuadmin help database create
+```
+
+:::
+
+### Creating a second immudb instance to replicate systemdb and defaultdb behaves similarly
+
+Start immudb with enabled replication:
+
+```bash
+$ ./immudb \
+ --replication-is-replica \
+ --replication-primary-username=immudb \
+ --replication-primary-password=immudb \
+ --replication-primary-host=127.0.0.1
+```
+
+::: tip
+
+Display all replication flags:
+
+```bash
+$ ./immudb --help
+```
+
+:::
+
+
+
+
+
+### Multiple replicas
+
+It's possible to create multiple replicas of a database. Each replica works independently of the others.
+
+
+
+![multiple replicas of the same primary database](/immudb/replication-multiple.jpg)
+
+
+
+Given the primary database acts in passive mode, there are no special steps needed in order to create more replicas. Thus, by repeating the same steps to create the first replica it's possible to configure new ones.
+
+
+
+
+
+### Replica of a replica
+
+In case many replicas are needed or the primary database is under heavy load, it's possible to delegate the creation of replicas to an existent replica. This way, the primary database is not affected by the total number of replicas being created.
+
+
+
+![a replica indirectly following the primary](/immudb/replication-chain.jpg)
+
+
+
+
+
+
+
+### External replicator
+
+By creating a database as a replica but with disabled replication, no replicator is created for the database and an external process could be used to replicate committed transactions from the primary. The grpc endpoint `ReplicateTx` may be used to externally replicate a transaction.
+
+
+
+
+
+### Heterogeneous settings
+
+Replication is configured per database. Thus, the same immudb server may hold several primary and replica databases at the same time.
+
+
+
+
+
+
+
+
+
+
+
+### Replicator tool
+
+You may need to keep a copy of every database on one immudb instance on another, so that when a new database is created
+on the main instance, a replicated database is created on the replica.
+
+In that case you can use the [replicator tool](https://github.com/codenotary/immudb-tools/tree/main/replicator), part of the
+[immudb tools](https://github.com/codenotary/immudb-tools).
+
+This tool connects to two immudb instances, one main instance and a replica. Periodically, scans the list of databases
+present on the main instance and it compares that with the list of databases present on the replica. If it finds any new
+databases that are missing on the replicas, it will recreate it on the replica and it will configure it to start following
+its counterpart on the main.
+
+If necessary (usually it is) it will also create the replication user on the main instance for the new database(s).
+
+Using this tool you won't need to manually configure replicated databases on replica instance(s).
+
+You can have more information about this tool on its [README page](https://github.com/codenotary/immudb-tools/tree/main/replicator).
+
+
+
+
+
+
+
+
+
+
diff --git a/src/1.9.5/production/request-metadata.md b/src/1.9.5/production/request-metadata.md
new file mode 100644
index 0000000000..22eb8d6235
--- /dev/null
+++ b/src/1.9.5/production/request-metadata.md
@@ -0,0 +1,26 @@
+# Request Metadata
+
+To enhance the auditing process, immudb can be configured to inject request information - such as user identifiers, IP addresses, and other relevant details - into transaction metadata.
+
+
+
+## Enabling request metadata logging
+
+Request metadata logging can be enabled by enabling the `--log-request-metdata` flag of the immudb command (or by setting
+the corresponding `IMMUDB_LOG_REQUEST_METADATA` env var to `TRUE`).
+
+For example, when running the immudb docker image:
+
+```bash
+$ docker run -e IMMUDB_LOG_REQUEST_METADATA=TRUE -d --net host --name immudb codenotary/immudb:latest
+```
+
+### Why should I enable request metadata?
+
+When this functionality is enabled, each transaction includes comprehensive metadata that provides context for the request executed by the immudb server. This metadata allows auditors and administrators to easily retrieve detailed information about the context of each transaction (see how to retrieve transaction metadata from an [SQL table](../develop/sql/querying.md#transaction-metadata)).
+
+Specifically, it enables the identification of who initiated the transaction, from which IP address, and any other pertinent request details. For example, if there is a need to investigate suspicious activity or trace the source of a particular change, this request metadata offers a clear and concise trail of the relevant data.
+
+Note that despite the extra information can increase the storage overhead, the benefits of enhanced transparency and accountability often outweigh the extra storage cost, ensuring that all actions within the database can be thoroughly examined and verified.
+
+
diff --git a/src/1.9.5/production/retention.md b/src/1.9.5/production/retention.md
new file mode 100644
index 0000000000..f8b37b1825
--- /dev/null
+++ b/src/1.9.5/production/retention.md
@@ -0,0 +1,118 @@
+# Retention
+
+
+
+## Data Retention
+
+Data retention refers to the practice of keeping data for a specific period of time before deleting it. This practice is commonly used in various industries and organizations to comply with legal and regulatory requirements, as well as to manage storage space and maintain data integrity.
+
+One of the primary benefits of data retention is its ability to help maintain disk space. By setting a retention period, organizations can automatically delete data that is no longer needed, freeing up disk space for new data. This can be particularly useful for organizations that deal with large amounts of data, such as those in the financial or healthcare industries, where storing vast amounts of data can be costly.
+
+In addition to helping maintain disk space, data retention also helps organizations manage their data more efficiently. By setting specific retention policies, organizations can ensure that data is only stored for as long as necessary and is deleted once it is no longer needed. This can help prevent data from being accidentally or maliciously retained beyond its usefulness, reducing the risk of data breaches and other security incidents.
+
+
+
+
+
+
+## Data Retention in immudb
+
+In immudb, the data retention feature only deletes data that is stored in the value log, leaving the proofs and schema configuration data intact. This is an important aspect of the retention feature because it ensures that the immudb database remains functional and that the proofs and schema configuration data required for immudb to operate correctly are not deleted.
+
+The value log is where the actual values are stored in immudb. By only deleting data in the value log, the retention feature can remove old data from the immudb database while leaving the proofs and schema configuration data intact, hence freeing up disk space.
+
+For example, suppose an organization has set a retention period of six months for their immudb database. After six months, any data that is older than six months will be automatically deleted from the value log.
+
+
+
+
+
+## Settings
+
+Data retention is enabled per database. You can truncate data from the database in two ways:
+
+#### 1) While creating a database
+```bash
+Usage:
+ immuadmin database create {database_name} --retention-period={retention_period} --truncation-frequency={truncation_frequency}
+Flags:
+ --retention-period duration duration of time to retain data in storage
+ --truncation-frequency duration set the truncation frequency for the database (default 24h0m0s)
+```
+A background process is setup on creation of the database which runs every `truncation-frequency` seconds, and then truncates the data beyond the `retention-period`
+
+Please note that the default value of the `truncation-frequency` is set to 24 hours, and it does not need to be set explicitly when creating/updating a database.
+
+#### 2) Manually truncating data through immuadmin
+
+The following flags in the `immuadmin` tool will help in truncating data up to data retention period for your database.
+```bash
+Usage:
+ immuadmin database truncate [flags]
+
+Examples:
+truncate --yes-i-know-what-i-am-doing {database_name} --retention-period {retention_period}
+
+Flags:
+ -h, --help help for truncate
+ --retention-period duration duration of time to retain data in storage
+ --yes-i-know-what-i-am-doing safety flag to confirm database truncation
+```
+
+
+
+
+
+## Setup
+
+This setup guides you through a simple demonstration of how data retention works in immudb.
+
+#### Before you begin
+
+Make sure you already have [immudb installed](../running/download.md).
+
+> Since you're running a local cluster, all nodes use the same hostname (`localhost`).
+
+#### Step 1. Start the cluster
+
+1. Run the immudb server:
+
+ ```bash
+ $ immudb --dir test_data
+ ```
+
+2. In a new terminal, use the [`immuadmin`](../connecting/clitools.md) command to create a database on the immudb server:
+
+ Login to immudb
+
+ ```shell
+ $ immuadmin login immudb
+ ```
+
+ Create a database `db` that sets up the retention period to 1 day.
+
+ > Note that the default value of the `truncation-frequency` is set to 24 hours, and it does not need to be set explicitly when creating/updating a database.
+
+ ```shell
+ $ immuadmin database create testdb \
+ --retention-period=24h
+ ```
+
+ At this point, the `testdb` has been created on the server, and when every 24 hours, the data greater than the `retention-period` will be deleted from the value-log.
+
+3. Alternatively, you can use the [`immuadmin`](../connecting/clitools.md) command to truncate an existing database which has not been setup with retention period:
+
+ Login to immudb
+
+ ```shell
+ $ immuadmin login immudb -p 3324
+ ```
+
+ ```shell
+ $ immuadmin database truncate --yes-i-know-what-i-am-doing=true testdb \
+ --retention-period=24h
+ ```
+
+ At this point, the data beyond the retention period will be deleted in`testdb`.
+
+
diff --git a/src/1.9.5/production/s3-storage.md b/src/1.9.5/production/s3-storage.md
new file mode 100644
index 0000000000..46bb711542
--- /dev/null
+++ b/src/1.9.5/production/s3-storage.md
@@ -0,0 +1,18 @@
+# S3 Storage Backend
+
+
+
+immudb can store its data in the Amazon S3 service (or a compatible alternative). The following example shows how to run immudb with the S3 storage enabled:
+
+```bash
+export IMMUDB_S3_STORAGE=true
+export IMMUDB_S3_ACCESS_KEY_ID=
+export IMMUDB_S3_SECRET_KEY=
+export IMMUDB_S3_BUCKET_NAME=
+export IMMUDB_S3_LOCATION=
+export IMMUDB_S3_PATH_PREFIX=testing-001
+export IMMUDB_S3_ENDPOINT="https://${IMMUDB_S3_BUCKET_NAME}.s3.amazonaws.com"
+
+./immudb
+```
+
\ No newline at end of file
diff --git a/src/1.9.5/production/sync-replication.md b/src/1.9.5/production/sync-replication.md
new file mode 100644
index 0000000000..a8bfa55273
--- /dev/null
+++ b/src/1.9.5/production/sync-replication.md
@@ -0,0 +1,429 @@
+# Synchronous Replication
+
+
+
+## Synchronous Replication
+
+Replication is a common technique used in distributed databases to achieve scalable data distribution for better fault tolerance. Multiple replicas of a primary database server are created for higher durability. One of the replication methods is to update each replica as part of a single atomic transaction, also known as synchronous replication. Consensus algorithms apply this approach to achieve strong consistency on a replicated data set. immudb now supports the option for synchronous replication.
+
+### Architecture
+
+In synchronous replication, each commit of a write transaction will wait until there is a confirmation that the commit has been committed to both the primary and quorum of replica server(s). This method minimizes the possibility of data loss.
+
+immudb uses a quorum-based technique to enforce consistent operation in a distributed cluster. A quorum of replicas is used to ensure that synchronous replication is achieved even when replication is not completed across all replica servers. A quorum is a majority of the number of replicas in a cluster setup. The quorum can be set when creating or updating the database on the primary node.
+
+The primary server will wait for acknowledgment from a quorum of replica server(s) that each transaction is durably stored before proceeding. The drawback is that if enough replica server(s) go down or can’t commit a transaction, and the quorum is not reached, the primary server goes into a hung state.
+
+![synchronous replication](/immudb/replication-sync.png)
+
+Comparing this to the asynchronous replication mode, the primary server does not need to wait for transaction-completion acknowledgment from the replica server. The replication transactions queue up on the replica server, and the two servers can remain out-of-sync for a specified time until the processing completes.
+
+![asynchronous replication](/immudb/replication-async.png)
+
+immudb provides support for synchronous replication by means of a follower approach. There are two grpc endpoint used for replication:
+
+- `ExportTx`: Used by replicas to fetch precommitted transactions from the primary database server, and also to send the current database state to update the primary server.
+
+- `ReplicateTx`: Used by replicas to commit precommitted transactions (fetched from the primary) on the replica server.
+
+The primary server keeps a record of the current state of each replica. The current state of each replica is updated through the `ExportTx` grpc call from the replica server. So when a new transaction request comes to the primary server, it precommits the transaction, and checks if a quorum (on the transaction) has been reached by the replica server(s) by checking their state continuously. If the quorum was reached, the transaction is marked as successful.
+
+
+
+![how synchronous replication works](/immudb/replication-state.png)
+
+
+
+
+
+
+
+## Deciding on number of servers in a cluster
+
+Synchronous replication in a cluster can function only if the majority of servers are up and running. In systems with enabled data replication, it is important to consider the throughput of write operations. Every time data is written to the cluster, it needs to be copied to multiple replicas. Every additional server adds some overhead to complete this write. The latency of data write is directly proportional to the number of servers forming the quorum.
+
+
+
+
+
+## Settings
+
+Synchronous replication is enabled per database. The following flags in the `immuadmin` tool will help in setting up synchronous replication for your database.
+
+```bash
+Flags:
+ --replication-allow-tx-discarding allow precommitted transactions to be discarded if the replica diverges from the primary
+ --replication-commit-concurrency uint32 number of concurrent replications (default 10)
+ --replication-is-replica set database as a replica
+ --replication-prefetch-tx-buffer-size uint32 maximum number of prefeched transactions (default 100)
+ --replication-primary-database string set primary database to be replicated
+ --replication-primary-host string set primary database host
+ --replication-primary-password string set password used for replication to connect to the primary database
+ --replication-primary-port uint32 set primary database port
+ --replication-primary-username string set username used for replication to connect to the primary database
+ --replication-sync-acks uint32 set a minimum number of replica acknowledgements required before transactions can be committed
+ --replication-sync-enabled enable synchronous replication
+
+```
+
+
+
+
+
+## Setup
+
+This setup guides you through a simple demonstration of how synchronous replication works in immudb. Starting with a 2-node local cluster, you'll write some data and verify that it replicates in sync.
+
+#### Before you begin
+
+Make sure you already have [immudb installed](../running/download.md).
+
+> Since you're running a local cluster, all nodes use the same hostname (`localhost`).
+
+#### Step 1. Start the cluster
+
+1. Run the primary server:
+
+ ```bash
+ $ immudb --dir data_primary
+ ```
+
+2. In a new terminal, start replica server:
+
+ ```bash
+ $ immudb --dir data_replica \
+ --port=3324 \
+ --pgsql-server=false \
+ --metrics-server=false
+ ```
+
+3. In a new terminal, use the [`immuadmin`](../connecting/clitools.md) command to create a database on the primary server:
+
+ Login to immudb
+
+ ```shell
+ $ immuadmin login immudb
+ ```
+
+ Create a database `db` that requires 1 confirmation from the synchronous replicas to do the commit.
+
+ > Note that the number of confirmations needed (`--replication-sync-acks` option) should be set to `ceil(number of replicas/2)`
+ to achieve majority-based quorum.
+
+ ```shell
+ $ immuadmin database create primarydb \
+ --replication-sync-acks 1 \
+ --replication-sync-enabled
+ ```
+
+ At this point, the `primarydb` has been created on the primary server.
+
+4. Use the [`immuadmin`](../connecting/clitools.md) command to create a database on the replica server:
+
+ Login to immudb
+
+ ```shell
+ $ immuadmin login immudb -p 3324
+ ```
+
+ Create a database `replicadb` which will sync from the primary server's database `primarydb`
+
+ ```shell
+ $ immuadmin database create replicadb -p 3324 \
+ --replication-is-replica \
+ --replication-primary-host 127.0.0.1 \
+ --replication-primary-port 3322 \
+ --replication-primary-database primarydb \
+ --replication-primary-username immudb \
+ --replication-primary-password immudb \
+ --replication-sync-enabled \
+ --replication-prefetch-tx-buffer-size 1000 \
+ --replication-commit-concurrency 100
+ ```
+
+ At this point, the `replicadb` has been created on the replica server to sync with the `primarydb` on primary server.
+
+#### Step 2. Send a request
+
+1. Use the [`immuclient`](../connecting/clitools.md) command to commit a transaction on the primary server:
+
+ Login to immudb
+
+ ```shell
+ $ immuclient login immudb
+ ```
+
+ Select database
+
+ ```shell
+ $ immuclient use primarydb
+ ```
+
+ Set a value
+
+ ```shell
+ $ immuclient safeset foo bar
+ ```
+
+2. Verify the transaction on the replica server using the [`immuclient`](../connecting/clitools.md) command:
+
+ Login to immudb
+
+ ```shell
+ $ immuclient login immudb -p 3324
+ ```
+
+ Select database
+
+ ```shell
+ $ immuclient use primarydb -p 3324
+ ```
+
+ Verify the key
+
+ ```shell
+ $ immuclient safeget foo -p 3324
+ ```
+
+#### Step 3. Stop the replica server
+
+1. Stop the replica server running on port 3325
+
+2. Send a transaction to the primary server:
+
+ Login to immudb
+
+ ```shell
+ $ immuclient login immudb
+ ```
+
+ Select database
+
+ ```shell
+ $ immuclient use primarydb
+ ```
+
+ Set a value
+
+ ```shell
+ $ immuclient safeset foo bar
+ ```
+
+ The client will block. This is because the primarydb requires 1 sync replica, and since the replica server is down, there is no ack from the replica server, hence synchronous transaction is blocked.
+
+
+
+
+
+## Recovering from a replica loss
+
+The primary node will continue read and write operations as long as the required quorum of replicas can send write confirmation to the primary node.
+If there are not enough confirmations, write operations will be queued and will wait for enough replicas to synchronize with the cluster.
+Read operations in such cases will continue to work.
+
+The simplest way to recover the replica is to simply add a new replica into the cluster and setup replication in the same way as during
+the initial cluster setup, e.g.:
+
+```shell
+$ immuadmin database create replicadb -p 3324 \
+ --replication-is-replica \
+ --replication-primary-host 127.0.0.1 \
+ --replication-primary-port 3322 \
+ --replication-primary-database primarydb \
+ --replication-primary-username immudb \
+ --replication-primary-password immudb \
+ --replication-sync-enabled \
+ --replication-prefetch-tx-buffer-size 1000 \
+ --replication-commit-concurrency 100
+```
+
+The new replica will start fetching transactions from the primary node and as soon as it synchronizes all transactions
+it will become a valid member of the quorum for transaction confirmation.
+
+### Speeding up initial replica synchronization
+
+The synchronization process of a new replica may take a lot of time if the database is large or has to handle a lot of normal traffic.
+Such replica will fetch all transactions performing additional checksum calculations and validations.
+That way the security of the whole cluster is further hardened revealing tampering attempts in any transaction
+in the database including those transactions that were not accessed for a very long time.
+
+There are situations however when the speed of recovery is crucial.
+In such a situations the data of the database may be copied from another cluster node.
+This should be done while the database is unloaded:
+
+#### Step 1. Create replica database
+
+```shell
+$ immuadmin database create replicadb -p 3324 \
+ --replication-is-replica \
+ --replication-primary-host 127.0.0.1 \
+ --replication-primary-port 3322 \
+ --replication-primary-database primarydb \
+ --replication-primary-username immudb \
+ --replication-primary-password immudb \
+ --replication-sync-enabled \
+ --replication-prefetch-tx-buffer-size 1000 \
+ --replication-commit-concurrency 100
+database 'replicadb' {replica: true} successfully created
+```
+
+#### Step 2. Unload replica from the database
+
+Once database is unloaded, we can safely work on the files of that database.
+
+```shell
+$ immuadmin database unload replicadb
+database 'replicadb' successfully unloaded
+```
+
+#### Step 3. Copy files from other node
+
+```shell
+$ rsync -ave --delete \
+ :/replicadb/ \
+ :/replicadb/
+sending incremental file list
+....
+
+sent 590,357,187 bytes received 230 bytes 168,673,547.71 bytes/sec
+total size is 590,212,158 speedup is 1.00
+```
+
+> Note: if there are writes on the database happening during the sync, it is necessary to
+> unload the source replica before copying files to avoid inconsistencies between database files.
+
+#### Step 4. Load database on new replica
+
+```shell
+$ immuadmin database load replicadb
+database 'replicadb' successfully unloaded
+```
+
+
+
+
+
+## Recovering from a primary loss
+
+Current immudb cluster setup requires the primary node to be always predefined.
+This means that in case of a primary node loss,
+it is necessary to manually promote a replica to become the primary node.
+Generally, electing the new primary depends on the number of available instances,
+their precommit state, and the replication-sync-acks setting on the primary.
+
+#### Step 1. Inspect states of all replicas in the cluster and choose the new primary node
+
+```shell
+$ immuclient login immudb
+Password:
+Successfully logged in
+
+$ immuclient use replicadb
+Now using replicadb
+
+$ immuclient status
+database: replicadb
+txID: 734931
+hash: 5e2f2feec159bc19c952a7a93832338a178936c5b258d0c906b7c145faf3a4b5
+precommittedTxID: 734931
+precommittedHash: 5e2f2feec159bc19c952a7a93832338a178936c5b258d0c906b7c145faf3a4b5
+```
+
+It's important to carefully choose the new primary node in order to avoid losing committed transactions.
+It is generally a good idea to promote some instance as a primary that has already precommitted the largest
+transaction contained in at least `replication-sync-acks` instances.
+
+In the following scenario, we consider a three-node cluster with an unreachable primary:
+
+```shell
+# state in replica1
+precommittedTxID: 734931
+precommittedHash: 5e2f2feec159bc19c952a7a93832338a178936c5b258d0c906b7c145faf3a4b5
+
+# state in replica2
+precommittedTxID: 734920
+precommittedHash: 2a4f41c3d5b03ff014ca30b53d23ee3a098936c3b2a8a0d6e9b3b540cac166a1
+```
+
+In the event that the primary node becomes unavailable, a replica with a higher precommittedTxID should be chosen as the primary.
+If `replication-sync-acks` is 2, both replicas must acknowledge precommit before the primary can commit.
+In the scenario above, this would mean 734920 was the most recent committed transaction. Therefore, replica2 could also be selected as the new primary.
+
+#### Step 2. Switch the selected replica to become new primary
+
+```shell
+$ immuadmin database update replicadb -p 3324 \
+ --replication-sync-enabled \
+ --replication-sync-acks 1 \
+ --replication-is-replica=false
+database 'replicadb' {replica: false} successfully updated
+```
+
+> Note that the number of required sync replicas may be temporarily lowered due to the loss of the primary node.
+
+#### Step 3. Switch other replicas to follow new primary
+
+```shell
+$ immuadmin database update replicadb -p 3325 \
+ --replication-primary-host 127.0.0.1 \
+ --replication-primary-port 3324 \
+ --replication-primary-database replicadb
+```
+
+#### Step 4. Truncate precommitted transactions on other replicas if needed
+
+It may happen that the new replica will reject synchronizing with the new primary.
+In such case immudb will report an error in logs:
+
+```text
+immudb 2022/10/11 15:57:42 ERROR: replica precommit state at 'replicadb' diverged from primary's
+```
+
+To fix this issue the replica may need to discard precommited transactions.
+This can be easily instructed with the flag `replication-allow-tx-discarding` as follows:
+
+```shell
+$ immuadmin database update replicadb -p 3325 --replication-allow-tx-discarding
+```
+
+In the case immudb instance itself is run a replica, to fix that issue please restart immudb with the `--replication-allow-tx-discarding` flag that will discard any transaction on the replica that has not yet been fully committed.
+
+#### Step 5. Start a new replica to restore original cluster size
+
+Because the primary node was irrecoverably lost, a new replica should be spawned in its place.
+Please refer to the previous section dealing with the loss of replica for more details
+on how to add a replacement replica node.
+
+#### Step 6. Point immudb clients to the new primary node
+
+Clients performing write operations should now be switched to the new primary node.
+
+
+
+
+
+## Changing configuration of a locked primary database
+
+In most cases the primary database can be easily updated and the change will be applied without the need for a restart.
+That way the primary node can change the number of required confirmations,
+enable/disable synchronous replication and even be converted to a replica.
+
+There can be a situation though where the database is already blocked with writes waiting for confirmations from replicas.
+This could happen if replicas became unavailable
+or as a result of misconfiguration where the replicas quorum value was set to some large value.
+
+In this situation trying to change the configuration of the database will block as well and will be unblocked once
+the database itself continues committing transactions.
+
+If the database can not be fixed to restore commits (e.g. if it is impossible to add enough synced replicas quickly enough),
+the following workaround can be used (please note that it requires immudb restart):
+
+1. Update database settings, e.g. run `immuadmin database update` command - that operation will block indefinitely but will
+ already persist new database settings
+2. Restart the immudb database instance - upon restart, the configuration of the database is read and applied from persistent settings
+ thus it will apply the configuration set in the previous step.
+
+With this approach, the number of required confirmations can be lowered down to the correct value
+or disabled to switch to asynchronous replication.
+
+
diff --git a/src/1.9.5/releasenotes.md b/src/1.9.5/releasenotes.md
new file mode 100644
index 0000000000..c8e150e8ec
--- /dev/null
+++ b/src/1.9.5/releasenotes.md
@@ -0,0 +1,8 @@
+# Release Notes
+
+
+
+- [immudb release notes](https://github.com/codenotary/immudb/releases)
+- [immugw release notes](https://github.com/codenotary/immugw/releases)
+
+
\ No newline at end of file
diff --git a/src/1.9.5/running/build.md b/src/1.9.5/running/build.md
new file mode 100644
index 0000000000..a1898036d6
--- /dev/null
+++ b/src/1.9.5/running/build.md
@@ -0,0 +1,46 @@
+# Building from source
+
+
+
+### Build the binaries
+
+Building binaries requires a Linux operating system.
+
+To build the binaries yourself, simply clone [immudb repository](https://github.com/codenotary/immudb) and run:
+
+```bash
+make all
+```
+
+immudb can be cross compiled for different systems and architectures by setting `GOOS` and `GOARCH` variables, i.e.:
+
+```bash
+GOOS=windows GOARCH=amd64 make all
+```
+
+
+
+
+
+### macOS specific
+
+The community already added immudb to [HomeBrew](https://formulae.brew.sh/formula/immudb), therefore you can simply run
+```bash
+brew install immudb
+```
+
+
+
+
+
+### Build the Docker images
+
+If you want to build the container images yourself, simply clone [immudb repository](https://github.com/codenotary/immudb) and run:
+
+```bash
+docker build -t myown/immudb:latest -f Dockerfile .
+docker build -t myown/immuadmin:latest -f Dockerfile.immuadmin .
+docker build -t myown/immuclient:latest -f Dockerfile.immuclient .
+```
+
+
diff --git a/src/1.9.5/running/configuration.md b/src/1.9.5/running/configuration.md
new file mode 100644
index 0000000000..1f39db841c
--- /dev/null
+++ b/src/1.9.5/running/configuration.md
@@ -0,0 +1,167 @@
+# Configuration
+
+
+
+This page describes how to set different settings in immudb.
+
+Settings can be specified as command line options to immudb (see `immudb -h`), in a configuration file, or as environment variables.
+
+
+
+
+
+### Settings
+
+| Parameter | Default | Description |
+| ------------------------------------- | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `address` | `0.0.0.0` | bind address |
+| `admin-password` | `immudb` | admin password as plain-text or base64 encoded (must be prefixed with 'enc:' if it is encoded) |
+| `auth` | `true` | enable auth |
+| `certificate` | `` | server certificate file path |
+| `clientcas` | `` | clients certificates list. Aka certificate authority |
+| `config` | `` | config file (default path are configs or $HOME. Default filename is immudb. |
+| `detached` | `false` | run immudb in background |
+| `devmode` | `false` | enable dev mode: accept remote connections without auth |
+| `dir` | `./data` | data folder |
+| `force-admin-password` | `false` | if true, reset the admin password to the one passed through admin-password option upon startup |
+| `log-request-metadata` | `false` | log request information in transaction metadata |
+| `grpc-reflection` | `true` | GRPC reflection server enabled |
+| `logfile` | `` | log path with filename. E.g. /tmp/immudb/immudb.log |
+| `logformat` | `text` | log format e.g. text/json |
+| `maintenance` | `false` | override the authentication flag |
+| `max-recv-msg-size` | `33554432` | max message size in bytes the server can receive |
+| `max-session-age-time` | infinity | max session age time is a duration after which session will be forcibly closed |
+| `max-session-inactivity-time` | `3m0s` | max session inactivity time is a duration after which an active session is declared inactive by the server. A session is kept active if server is still receiving requests from client (keep-alive or other methods) |
+| `max-sessions` | `100` | maximum number of simultaneously opened sessions |
+| `metrics-server` | `true` | enable or disable Prometheus endpoint |
+| `metrics-server-port` | `9477` | Prometheus endpoint port |
+| `mtls` | `false` | enable mutual tls |
+| `no-histograms` | `false` | disable collection of histogram metrics like query durations |
+| `pgsql-server` | `true` | enable or disable pgsql server |
+| `pgsql-server-port` | `5432` | pgsql server port |
+| `pidfile` | `` | pid path with filename. E.g. /var/run/immudb.pid |
+| `pkey` | `` | server private key path |
+| `port` | `3322` | port number |
+| `pprof` | `false` | add pprof profiling endpoint on the metrics server |
+| `replication-allow-tx-discarding` | `false` | allow precommitted transactions to be discarded if the replica diverges from the primary |
+| `replication-commit-concurrency` | `10` | number of concurrent replications |
+| `replication-is-replica` | `false` | set systemdb and defaultdb as replica |
+| `replication-prefetch-tx-buffer-size` | `100` | maximum number of prefeched transactions |
+| `replication-primary-host` | `` | primary database host (if replica=true) |
+| `replication-primary-password` | `` | password in the primary database used for replication of systemdb and defaultdb |
+| `replication-primary-port` | `3322` | primary database port (if replica=true) (default 3322) |
+| `replication-primary-username` | `` | username in the primary database used for replication of systemdb and defaultdb |
+| `replication-skip-integrity-check` | `false` | disable integrity check when reading data during replication |
+| `replication-sync-acks` | `0` | set a minimum number of replica acknowledgements required before transactions can be committed |
+| `replication-sync-enabled` | `false` | enable synchronous replication |
+| `replication-wait-for-indexing` | `false` | wait for indexing to be up to date during replication |
+| `s3-access-key-id` | `` | s3 access key id |
+| `s3-bucket-name` | `` | s3 bucket name |
+| `s3-endpoint` | `` | s3 endpoint |
+| `s3-external-identifier` | `` | use the remote identifier if there is no local identifier |
+| `s3-instance-metadata-url` | `http://169.254.169.254` | s3 instance metadata url |
+| `s3-location` | `` | s3 location (region) |
+| `s3-path-prefix` | `` | s3 path prefix (multiple immudb instances can share the same bucket if they have different prefixes) |
+| `s3-role` | `` | role name for role-based authentication attempt for s3 storage |
+| `s3-role-enabled` | `false` | enable role-based authentication for s3 storage |
+| `s3-secret-key` | `` | s3 secret access key |
+| `s3-storage` | `false` | enable or disable s3 storage |
+| `session-timeout` | `2m0s` | session timeout is a duration after which an inactive session is forcibly closed by the server |
+| `signingKey` | `` | signature private key path. If a valid one is provided, it enables the cryptographic signature of the root. E.g. "./../test/signer/ec3.key" |
+| `swaggerui` | `true` | Swagger UI enabled |
+| `synced` | `true` | synced mode prevents data lost under unexpected crashes but affects performance |
+| `token-expiry-time` | `1440` | client authentication token expiration time. Minutes |
+| `web-server` | `true` | enable or disable web/console server |
+| `web-server-port` | `8080` | web/console server port |
+
+
+
+
+
+### Configuration file
+
+Settings can be specified in a [immudb.toml configuration file](https://raw.githubusercontent.com/codenotary/immudb/master/configs/immudb.toml).
+
+Which configuration file to use is set with the `--config` option. By default, immudb looks into the `configs` subfolder in the current directory.
+
+When running immudb as a service, `immudb service install` allows to specify the configuration file to use with the `--config` option.
+
+### Environment variables
+
+Settings specified via environment variables take override the configuration file. They are specified in the form of `IMMUDB_`, for example `IMMUDB_DIR` specifies the `dir` variable.
+
+
+
+
+
+### Logging Levels
+
+The `LOG_LEVEL` environment variable sets the log level to be emitted from immudb logs. Valid logging level settings are `error`, `warn`, `info`(default), and `debug`. Logs that are equal to, or above, the specified level will be emitted. Log level `error` has the highest level, `debug` being the lowest.
+
+You can set the `LOG_LEVEL` when running immudb either by setting the environment variable, or by running the command as below:
+
+```
+LOG_LEVEL=error ./immudb
+```
+
+#### Levels
+
+##### info
+
+The `info` severity is used for informational messages that do not require action.
+
+##### warn
+
+The `warn` severity is used for messages that may require special handling, but does not affect normal operation.
+
+##### error
+
+The `error` severity is used for messages that require special handling, where a normal database operation could not proceed as expected. It does not block the database.
+
+##### debug
+
+The `debug` severity is used for messages that are used for debugging purpose for the database.
+
+### Logging formats
+
+Two logging format options are available: `text` and `json`. The default logging format setting is the `text`. The `json` format is available when specified.
+
+#### Examples of log output:
+
+##### Normal
+
+###### Command:
+
+```
+./immudb
+```
+
+###### Output:
+
+```bash
+immudb 2022/11/17 14:30:02 INFO: Creating database 'systemdb' {replica = false}...
+immudb 2022/11/17 14:30:02 INFO: Binary Linking up to date at 'data/systemdb'
+immudb 2022/11/17 14:30:02 INFO: Index 'data/systemdb/index' {ts=0, discarded_snapshots=0} successfully loaded
+immudb 2022/11/17 14:30:02 INFO: Indexing in progress at 'data/systemdb'
+immudb 2022/11/17 14:30:02 INFO: Flushing index 'data/systemdb/index' {ts=0, cleanup_percentage=0.00/0.00, since_cleanup=0} requested via SnapshotSince...
+immudb 2022/11/17 14:30:02 INFO: Index 'data/systemdb/index' {ts=0, cleanup_percentage=0.00/0.00} successfully flushed
+```
+
+##### JSON
+
+###### Command:
+
+```
+./immudb --logformat=json
+```
+
+###### Output:
+
+```bash
+{"caller":"codenotary/immudb/pkg/database/database.go:179","component":"github.com/codenotary/immudb/pkg/database.OpenDB","level":"info","message":"Opening database 'systemdb' {replica = false}...","module":"immudb","timestamp":"2022-11-17T14:32:28.890774+05:30"}
+{"caller":"codenotary/immudb/embedded/store/immustore.go:553","component":"github.com/codenotary/immudb/embedded/store.OpenWith","level":"info","message":"Binary Linking up to date at 'data/systemdb'","module":"immudb","timestamp":"2022-11-17T14:32:28.898035+05:30"}
+{"caller":"codenotary/immudb/embedded/tbtree/tbtree.go:351","component":"github.com/codenotary/immudb/embedded/tbtree.Open","level":"info","message":"Reading snapshots at 'data/systemdb/index/commit'...","module":"immudb","timestamp":"2022-11-17T14:32:28.898296+05:30"}
+{"caller":"codenotary/immudb/embedded/tbtree/tbtree.go:669","component":"github.com/codenotary/immudb/embedded/tbtree.OpenWith","level":"info","message":"Index 'data/systemdb/index' {ts=2, discarded_snapshots=0} successfully loaded","module":"immudb","timestamp":"2022-11-17T14:32:28.904722+05:30"}
+```
+
+
diff --git a/src/1.9.5/running/download.md b/src/1.9.5/running/download.md
new file mode 100644
index 0000000000..3dfff38c05
--- /dev/null
+++ b/src/1.9.5/running/download.md
@@ -0,0 +1,29 @@
+# Running
+
+::: tip
+To learn interactively and to get started with immudb from the command line and with programming languages, visit the immudb Playground at
+:::
+
+
+
+You may download the immudb binary from [the latest releases on Github](https://github.com/codenotary/immudb/releases/latest). Once you have downloaded immudb, rename it to `immudb`, make sure to mark it as executable, then run it. The following example shows how to obtain v1.9DOM.1 for linux amd64:
+
+```bash
+$ wget https://github.com/vchain-us/immudb/releases/download/v1.9DOM.1/immudb-v1.9DOM.1-linux-amd64
+$ mv immudb-v1.9DOM.1-linux-amd64 immudb
+$ chmod +x immudb
+
+# run immudb in the foreground to see all output
+$ ./immudb
+
+# or run immudb in the background
+$ ./immudb -d
+```
+
+Alternatively, you may [pull immudb docker image from DockerHub](https://hub.docker.com/r/codenotary/immudb) and run it in a ready-to-use container:
+
+```bash
+$ docker run -d --net host -it --rm --name immudb codenotary/immudb:latest
+```
+
+
diff --git a/src/1.9.5/running/service.md b/src/1.9.5/running/service.md
new file mode 100644
index 0000000000..0ac7c259f3
--- /dev/null
+++ b/src/1.9.5/running/service.md
@@ -0,0 +1,34 @@
+
+# Running as a service
+
+
+
+Every operating system has different ways of running services. immudb provides a facility called `immudb service` to hide this complexity.
+
+To install the service run as root:
+
+```bash
+$ ./immudb service install
+```
+
+This will for example, on Linux, install `/etc/systemd/system/immudb.service` and create the appropriate user to run the service. On other operating systems, the native method would be used.
+
+The `immudb service` command also allows to control the lifecycle of the service:
+
+```bash
+$ ./immudb service start
+$ ./immudb service stop
+$ ./immudb service status
+```
+
+On Linux, `immudb service status` is equivalent to `systemctl status immudb.service`, and is what it does under the hoods.
+
+
+
+
+
+### macOS specific
+
+In case you want to run immudb as a service, please check the following [guideline](https://medium.com/swlh/how-to-use-launchd-to-run-services-in-macos-b972ed1e352).
+
+
\ No newline at end of file
diff --git a/src/1.9.5/samples/go.md b/src/1.9.5/samples/go.md
new file mode 100644
index 0000000000..b84fc94831
--- /dev/null
+++ b/src/1.9.5/samples/go.md
@@ -0,0 +1,129 @@
+
+# App samples in Go
+
+
+
+This section includes sample applications using immudb in Go.
+
+Although the applications are simple, they will provide fully functional samples to demonstrate how to write an application using immudb.
+
+
+
+
+
+## Hello Immutable World
+
+The classical `Hello World` sample adapted to immudb.
+
+This simple application is using the [official immudb go sdk](https://github.com/codenotary/immudb/tree/master/pkg/client) to connect, store and retrieve key-value data from immudb server.
+
+The full source code of this sample can be found at [Hello Immutable World](https://github.com/codenotary/immudb-client-examples/tree/master/go/hello-immutable-world).
+
+### Prerequisites
+
+In order to run this sample, immudb server must be already running. This step is quite simple and it's described at [Running immudb](https://docs.immudb.io/master/running/download.html).
+
+### Building and running the sample app
+
+To build and run the sample application, simply clone the [Hello Immutable World](https://github.com/codenotary/immudb-client-examples/tree/master/go/hello-immutable-world) and run:
+
+```
+go mod tidy
+go build
+./hello-immutable-world
+```
+
+The sample application will run and display an output similar to
+
+```
+Sucessfully set a verified entry: ('hello', 'immutable world') @ tx 1
+Sucessfully got verified entry: ('hello', 'immutable world') @ tx 1
+```
+
+
+
+
+
+## WebApp using SQL
+
+The purpose of this sample application is to demonstrate the use of immudb using [Go standard APIs for SQL](https://pkg.go.dev/database/sql).
+
+This sample was written taking as a basis the tutorial [Building a simple app with Go and PostgreSQL](https://blog.logrocket.com/building-simple-app-go-postgresql/). We followed the same application structure even though the source code is different to show how immudb and PostgreSQL can be used in analogy.
+
+The full source code of this sample can be found at [WebApp using SQL](https://github.com/codenotary/immudb-client-examples/tree/master/go/todos-sample-stdlib)
+
+
+
+
+
+### Prerequisites
+
+In order to run this sample, immudb server must be already running. This step is quite simple and it's described at [Running immudb](https://docs.immudb.io/master/running/download.html).
+
+### Building and running the sample app
+
+To build and run the sample application, simply clone the [sample repository](https://github.com/codenotary/immudb-client-examples/tree/master/go/todos-sample-stdlib) and run:
+
+```
+go mod tidy
+go build
+./immudb-todo-webapp
+```
+
+The sample application should be up and running now. The port 3000 is used by default unless a different one is specified using `PORT` environment variable e.g. `PORT=3001 ./immudb-todo-webapp`
+
+::: tip
+Database initialization statements might be stored in an external file as in this sample [sql initialization script](https://github.com/codenotary/immudb-client-examples/tree/master/go/stdlib-init-script).
+:::
+
+
+
+
+
+## Command line app using SQL
+
+A simple reminder console app that stores all data in immudb.
+
+As in the previous sample, the purpose of this sample application is to demonstrate the use of immudb using [Go standard APIs for SQL](https://pkg.go.dev/database/sql).
+
+The full source code of this sample can be found at [Console sample using SQL](https://github.com/codenotary/immudb-client-examples/tree/master/go/immudb-reminder-app).
+
+### Prerequisites
+
+In order to run this sample, immudb server must be already running. This step is quite simple and it's described at [Running immudb](https://docs.immudb.io/master/running/download.html).
+
+### Building and running the sample app
+
+To build and run the sample application, simply clone the [Console sample using SQL](https://github.com/codenotary/immudb-client-examples/tree/master/go/immudb-reminder-app) and run:
+
+```
+go mod tidy
+go build
+./immudb-reminder-app
+```
+
+The sample application should be up and running now.
+
+Additionally, this sample application provides a simple way to specify connection settings. run `./immudb-reminder-app -h` to display all the available flags.
+
+```
+Usage of ./immudb-reminder-app:
+ -addr string
+ IP address of immudb server (default "localhost")
+ -db string
+ Name of the database to use (default "defaultdb")
+ -pass string
+ Password for authenticating to immudb (default "immudb")
+ -port string
+ Port number of immudb server (default "3322")
+ -user string
+ Username for authenticating to immudb (default "immudb")
+```
+
+
+
+
+::: tip
+Additional samples can be found at [immudb client samples repository](https://github.com/codenotary/immudb-client-examples/tree/master/go).
+:::
+
diff --git a/src/master/develop/sql/querying.md b/src/master/develop/sql/querying.md
index 6e33b49532..ec68ce5c74 100644
--- a/src/master/develop/sql/querying.md
+++ b/src/master/develop/sql/querying.md
@@ -26,6 +26,19 @@ FROM customers;
+### Selecting expressions
+
+In addition to selecting specific column values, you can also select expressions:
+
+```sql
+SELECT NOT active as disabled, SUBSTRING(customer_name, 1, 3) AS short_name, age >= 21 AS is_adult
+FROM customers;
+```
+
+
+
+
+
### Filtering entries
```sql
diff --git a/src/master/running/configuration.md b/src/master/running/configuration.md
index 1f39db841c..f406bea482 100644
--- a/src/master/running/configuration.md
+++ b/src/master/running/configuration.md
@@ -16,7 +16,8 @@ Settings can be specified as command line options to immudb (see `immudb -h`), i
| ------------------------------------- | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `address` | `0.0.0.0` | bind address |
| `admin-password` | `immudb` | admin password as plain-text or base64 encoded (must be prefixed with 'enc:' if it is encoded) |
-| `auth` | `true` | enable auth |
+| `auth` | `true` | enable auth |
+| `auto-cert` | `false` | start the server using a generated, self-signed HTTPS certificate |
| `certificate` | `` | server certificate file path |
| `clientcas` | `` | clients certificates list. Aka certificate authority |
| `config` | `` | config file (default path are configs or $HOME. Default filename is immudb. |
@@ -24,11 +25,16 @@ Settings can be specified as command line options to immudb (see `immudb -h`), i
| `devmode` | `false` | enable dev mode: accept remote connections without auth |
| `dir` | `./data` | data folder |
| `force-admin-password` | `false` | if true, reset the admin password to the one passed through admin-password option upon startup |
+| `log-access` | `false` | log incoming requests information (username, IP, etc...) |
| `log-request-metadata` | `false` | log request information in transaction metadata |
+| `log-rotation-age` | `0` | maximum duration (age) of a log segment before it is rotated |
+| `log-rotation-size` | `0` | maximum size a log segment can reach before being rotated |
+| `logdir` | `immulog` | log path base dir /tmp/immudb/immulog (default "immulog") |
| `grpc-reflection` | `true` | GRPC reflection server enabled |
| `logfile` | `` | log path with filename. E.g. /tmp/immudb/immudb.log |
| `logformat` | `text` | log format e.g. text/json |
-| `maintenance` | `false` | override the authentication flag |
+| `maintenance` | `false` | override the authentication flag |
+| `max-active-databases` | `100` | the maximum number of databases that can be active simultaneously (default 100) |
| `max-recv-msg-size` | `33554432` | max message size in bytes the server can receive |
| `max-session-age-time` | infinity | max session age time is a duration after which session will be forcibly closed |
| `max-session-inactivity-time` | `3m0s` | max session inactivity time is a duration after which an active session is declared inactive by the server. A session is kept active if server is still receiving requests from client (keep-alive or other methods) |