diff --git a/404.html b/404.html index cc1c01297..bd8139e7f 100644 --- a/404.html +++ b/404.html @@ -6,7 +6,7 @@ - + @@ -31,11 +31,11 @@ - + - + - +
diff --git a/frinx-machine/azure-ad/index.html b/frinx-machine/azure-ad/index.html index 8417f4a90..5d14e80b6 100644 --- a/frinx-machine/azure-ad/index.html +++ b/frinx-machine/azure-ad/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
diff --git a/frinx-machine/getting-started/index.html b/frinx-machine/getting-started/index.html index 523d71e0f..2589e458c 100644 --- a/frinx-machine/getting-started/index.html +++ b/frinx-machine/getting-started/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
diff --git a/frinx-machine/monitoring/index.html b/frinx-machine/monitoring/index.html index 5e921e837..ad35ee848 100644 --- a/frinx-machine/monitoring/index.html +++ b/frinx-machine/monitoring/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
diff --git a/frinx-machine/use-cases/add-to-inventory-and-install/index.html b/frinx-machine/use-cases/add-to-inventory-and-install/index.html index 89959c65f..6e3455982 100644 --- a/frinx-machine/use-cases/add-to-inventory-and-install/index.html +++ b/frinx-machine/use-cases/add-to-inventory-and-install/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
diff --git a/frinx-machine/use-cases/create-l2-vpn-p2p/index.html b/frinx-machine/use-cases/create-l2-vpn-p2p/index.html index 2de4c08d5..e55cc681c 100644 --- a/frinx-machine/use-cases/create-l2-vpn-p2p/index.html +++ b/frinx-machine/use-cases/create-l2-vpn-p2p/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
diff --git a/frinx-machine/use-cases/frinx-machine-demo-manual/index.html b/frinx-machine/use-cases/frinx-machine-demo-manual/index.html index c7080c5c7..776c058b2 100644 --- a/frinx-machine/use-cases/frinx-machine-demo-manual/index.html +++ b/frinx-machine/use-cases/frinx-machine-demo-manual/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
diff --git a/frinx-machine/use-cases/index.html b/frinx-machine/use-cases/index.html index 627a96a40..20ba962cd 100644 --- a/frinx-machine/use-cases/index.html +++ b/frinx-machine/use-cases/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
diff --git a/frinx-machine/use-cases/install-all-devices-from-inventory/index.html b/frinx-machine/use-cases/install-all-devices-from-inventory/index.html index 79a9beb5a..76cb03cf8 100644 --- a/frinx-machine/use-cases/install-all-devices-from-inventory/index.html +++ b/frinx-machine/use-cases/install-all-devices-from-inventory/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
diff --git a/frinx-machine/use-cases/policy-filter-xr/index.html b/frinx-machine/use-cases/policy-filter-xr/index.html index f1025f571..eda7b93ba 100644 --- a/frinx-machine/use-cases/policy-filter-xr/index.html +++ b/frinx-machine/use-cases/policy-filter-xr/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
diff --git a/frinx-resource-manager/architecture/index.html b/frinx-resource-manager/architecture/index.html index 5575eca3a..740e78e5f 100644 --- a/frinx-resource-manager/architecture/index.html +++ b/frinx-resource-manager/architecture/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
diff --git a/frinx-resource-manager/developer-guide/index.html b/frinx-resource-manager/developer-guide/index.html index 8942ac764..73d9dea65 100644 --- a/frinx-resource-manager/developer-guide/index.html +++ b/frinx-resource-manager/developer-guide/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
diff --git a/frinx-resource-manager/introduction/index.html b/frinx-resource-manager/introduction/index.html index 6c9f057b9..9c2b66b0a 100644 --- a/frinx-resource-manager/introduction/index.html +++ b/frinx-resource-manager/introduction/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
diff --git a/frinx-resource-manager/pools/index.html b/frinx-resource-manager/pools/index.html index adce2632b..646c55ed2 100644 --- a/frinx-resource-manager/pools/index.html +++ b/frinx-resource-manager/pools/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
diff --git a/frinx-resource-manager/user-guide/index.html b/frinx-resource-manager/user-guide/index.html index e51d4656f..823ee9007 100644 --- a/frinx-resource-manager/user-guide/index.html +++ b/frinx-resource-manager/user-guide/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
diff --git a/frinx-uniconfig/developer-guide/architecture/index.html b/frinx-uniconfig/developer-guide/architecture/index.html index 09a8b9a7e..54fbd6a77 100644 --- a/frinx-uniconfig/developer-guide/architecture/index.html +++ b/frinx-uniconfig/developer-guide/architecture/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
diff --git a/frinx-uniconfig/developer-guide/cli-translation-unit/index.html b/frinx-uniconfig/developer-guide/cli-translation-unit/index.html index 067df17a4..aec018845 100644 --- a/frinx-uniconfig/developer-guide/cli-translation-unit/index.html +++ b/frinx-uniconfig/developer-guide/cli-translation-unit/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
diff --git a/frinx-uniconfig/developer-guide/index.html b/frinx-uniconfig/developer-guide/index.html index 1d165b928..4af228990 100644 --- a/frinx-uniconfig/developer-guide/index.html +++ b/frinx-uniconfig/developer-guide/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
diff --git a/frinx-uniconfig/developer-guide/metrics/index.html b/frinx-uniconfig/developer-guide/metrics/index.html index 15fd73789..101c3d8ac 100644 --- a/frinx-uniconfig/developer-guide/metrics/index.html +++ b/frinx-uniconfig/developer-guide/metrics/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
diff --git a/frinx-uniconfig/developer-guide/native-cli-units/index.html b/frinx-uniconfig/developer-guide/native-cli-units/index.html index 21360b47d..3ce70d9b0 100644 --- a/frinx-uniconfig/developer-guide/native-cli-units/index.html +++ b/frinx-uniconfig/developer-guide/native-cli-units/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
diff --git a/frinx-uniconfig/developer-guide/netconf-translation-unit/index.html b/frinx-uniconfig/developer-guide/netconf-translation-unit/index.html index a0dbfabc6..eb0e26a1e 100644 --- a/frinx-uniconfig/developer-guide/netconf-translation-unit/index.html +++ b/frinx-uniconfig/developer-guide/netconf-translation-unit/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
diff --git a/frinx-uniconfig/developer-guide/open-config-to-device-config-mapping/index.html b/frinx-uniconfig/developer-guide/open-config-to-device-config-mapping/index.html index 5a7ae914a..4dfc564b3 100644 --- a/frinx-uniconfig/developer-guide/open-config-to-device-config-mapping/index.html +++ b/frinx-uniconfig/developer-guide/open-config-to-device-config-mapping/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
diff --git a/frinx-uniconfig/developer-guide/translation-unit-general-implementation/index.html b/frinx-uniconfig/developer-guide/translation-unit-general-implementation/index.html index 49be05e3c..c26b774d4 100644 --- a/frinx-uniconfig/developer-guide/translation-unit-general-implementation/index.html +++ b/frinx-uniconfig/developer-guide/translation-unit-general-implementation/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
diff --git a/frinx-uniconfig/developer-guide/translation-units-docs/index.html b/frinx-uniconfig/developer-guide/translation-units-docs/index.html index de5ff183e..65a671faf 100644 --- a/frinx-uniconfig/developer-guide/translation-units-docs/index.html +++ b/frinx-uniconfig/developer-guide/translation-units-docs/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
diff --git a/frinx-uniconfig/developer-guide/translation-units-in-general/index.html b/frinx-uniconfig/developer-guide/translation-units-in-general/index.html index 2de3160df..ad802ef90 100644 --- a/frinx-uniconfig/developer-guide/translation-units-in-general/index.html +++ b/frinx-uniconfig/developer-guide/translation-units-in-general/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
diff --git a/frinx-uniconfig/generated-api-libs/index.html b/frinx-uniconfig/generated-api-libs/index.html index facc60a16..7620a24af 100644 --- a/frinx-uniconfig/generated-api-libs/index.html +++ b/frinx-uniconfig/generated-api-libs/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
diff --git a/frinx-uniconfig/getting-started/index.html b/frinx-uniconfig/getting-started/index.html index 80a265bd4..6b0dd1086 100644 --- a/frinx-uniconfig/getting-started/index.html +++ b/frinx-uniconfig/getting-started/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
diff --git a/frinx-uniconfig/glossary-of-terms/index.html b/frinx-uniconfig/glossary-of-terms/index.html index c2d241512..7c7254247 100644 --- a/frinx-uniconfig/glossary-of-terms/index.html +++ b/frinx-uniconfig/glossary-of-terms/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
diff --git a/frinx-uniconfig/q_a/index.html b/frinx-uniconfig/q_a/index.html index 8bb79f93a..14c9adf00 100644 --- a/frinx-uniconfig/q_a/index.html +++ b/frinx-uniconfig/q_a/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
diff --git a/frinx-uniconfig/release-notes/index.html b/frinx-uniconfig/release-notes/index.html index c3d3ee0b3..22d029e77 100644 --- a/frinx-uniconfig/release-notes/index.html +++ b/frinx-uniconfig/release-notes/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
@@ -255,6 +255,7 @@

  • Release notes for UniConfig 5.1.14
  • +
  • Release notes for UniConfig 5.1.14
  • Release notes for UniConfig 5.2.2
  • Release notes for UniConfig 5.1.13
  • Release notes for UniConfig 5.2.1
  • diff --git a/frinx-uniconfig/release-notes/uniconfig-4.2.10/index.html b/frinx-uniconfig/release-notes/uniconfig-4.2.10/index.html index cf548f798..d6d237068 100644 --- a/frinx-uniconfig/release-notes/uniconfig-4.2.10/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-4.2.10/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-4.2.3/index.html b/frinx-uniconfig/release-notes/uniconfig-4.2.3/index.html index 625239ea7..8dc88521d 100644 --- a/frinx-uniconfig/release-notes/uniconfig-4.2.3/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-4.2.3/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-4.2.4/index.html b/frinx-uniconfig/release-notes/uniconfig-4.2.4/index.html index 8b0d4e564..bf99ae08f 100644 --- a/frinx-uniconfig/release-notes/uniconfig-4.2.4/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-4.2.4/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-4.2.5/index.html b/frinx-uniconfig/release-notes/uniconfig-4.2.5/index.html index e1b2d03f4..0114a4001 100644 --- a/frinx-uniconfig/release-notes/uniconfig-4.2.5/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-4.2.5/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-4.2.6/index.html b/frinx-uniconfig/release-notes/uniconfig-4.2.6/index.html index a414d015e..8da7557ce 100644 --- a/frinx-uniconfig/release-notes/uniconfig-4.2.6/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-4.2.6/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-4.2.7/index.html b/frinx-uniconfig/release-notes/uniconfig-4.2.7/index.html index 0478cb1cf..f3ca4a192 100644 --- a/frinx-uniconfig/release-notes/uniconfig-4.2.7/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-4.2.7/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-4.2.8/index.html b/frinx-uniconfig/release-notes/uniconfig-4.2.8/index.html index 839008b93..3187076c5 100644 --- a/frinx-uniconfig/release-notes/uniconfig-4.2.8/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-4.2.8/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-4.2.9/index.html b/frinx-uniconfig/release-notes/uniconfig-4.2.9/index.html index 3f6749adf..cb529fb9f 100644 --- a/frinx-uniconfig/release-notes/uniconfig-4.2.9/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-4.2.9/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.1/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.1/index.html index 33f9f2640..1723a0cc8 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.1/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.1/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.10/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.10/index.html index 3ab95a43b..5538fcbea 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.10/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.10/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.11/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.11/index.html index 18f5a7d9d..e2ef0e72b 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.11/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.11/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.12/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.12/index.html index 258404405..b4f407a8f 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.12/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.12/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.13/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.13/index.html index 583a43ede..ff19799d0 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.13/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.13/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.14/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.14/index.html index 6ae63eac9..eca34724a 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.14/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.14/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.15/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.15/index.html index c83822a43..8a94af329 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.15/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.15/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.16/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.16/index.html index b53e564b3..f64840ff5 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.16/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.16/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.17/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.17/index.html index 9c0b7c7cd..0d4addb2e 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.17/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.17/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.18/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.18/index.html index 3b2907f75..a821b418a 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.18/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.18/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.19/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.19/index.html index 7acedbf35..c0672b49d 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.19/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.19/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.2/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.2/index.html index 4466d1085..e4c96a7c9 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.2/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.2/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.20/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.20/index.html index 7101f7458..f900fd982 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.20/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.20/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.21/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.21/index.html index 4627cddae..223e08804 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.21/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.21/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.22/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.22/index.html index 828367599..07e9c8d95 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.22/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.22/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.23/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.23/index.html index 457ea80d1..c8090d646 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.23/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.23/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.24/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.24/index.html index 254eecd8d..4f04ee3f0 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.24/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.24/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.25/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.25/index.html index 7a6ec5a43..327359782 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.25/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.25/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.3/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.3/index.html index 28aa34242..196b3c487 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.3/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.3/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.4/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.4/index.html index 4a2313881..c9c1b5131 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.4/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.4/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.5/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.5/index.html index 70403af04..e70cce8dd 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.5/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.5/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.6/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.6/index.html index 49a9bd350..85e935d74 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.6/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.6/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.7/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.7/index.html index f3d829d86..80fced14a 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.7/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.7/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.8/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.8/index.html index b8163919f..2bb10483c 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.8/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.8/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.0.9/index.html b/frinx-uniconfig/release-notes/uniconfig-5.0.9/index.html index 6befc0473..8c709168e 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.0.9/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.0.9/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.0/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.0/index.html index 3d53336e4..a15c09c8a 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.0/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.0/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.1/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.1/index.html index 444e67b0e..90a5cc99a 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.1/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.1/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.10/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.10/index.html index e45a97386..493bfb1d2 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.10/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.10/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.11/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.11/index.html index 809d21667..c2f437eff 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.11/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.11/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.12/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.12/index.html index ad6464a7e..31a7bcdf7 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.12/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.12/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.13/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.13/index.html index d375b5431..f9230daa1 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.13/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.13/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    @@ -359,7 +359,7 @@

    Next - Uniconfig 5.​1.​14 + Uniconfig 5.​1.​14 Release Notes diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.14/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.14/index.html index c0db6573c..6ab3dcc99 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.14/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.14/index.html @@ -6,15 +6,15 @@ - + - Uniconfig 5.1.14 | Frinx Docs - - + Uniconfig 5.1.14 Release Notes | Frinx Docs + + @@ -22,23 +22,23 @@ - - + + - - + + - + - + - +
    @@ -247,69 +247,32 @@
    - +

    - # - Uniconfig 5.1.14 + # + Uniconfig 5.1.14 Release Notes

    - - -

    - # - What's Changed -

    -
    - -

    - # - 🐞 Bug Fixes -

    -
    -
      -
    • [UNIC-1429] Fix replace operation in GNMI set
    • -
    • [UNIC-1471] : Fix sync fail after failed installation was stored STABLE
    • -
    • [UNIC-1474] Improve performance of YANG repository loading process during mounting process (#1785)
    • -
    • UniConfig Shell - fix prompt callbacks bug
    • -
    • [UNIC-1492] - Fix rate-limiting
    • -
    • Fixed loading of gNMI YANG repository during MountNodeTask
    • -
    • [UNIC-1471] Add schema-cache storing into sync impl
    • -
    • Prevented sending no description command if there is no change for rpd description
    • -
    • Gnmi sb netconf cache loader stable
    • -
    • [UNIC-1494] - add migration for replace-paths
    • -
    • Fix get fallback schema context in cli shell.
    • -
    • Fix settings / callbacks cache
    • -
    -

    +

    # New Features -

    -
    -
      -
    • [UNIC-1075] Uniconfig shell hide / unhide command implementation
    • -
    - -

    - # - 💡 Improvements -

    +

      -
    • [UNIC-1408] UniConfig Shell - adjust cached data (#1745)
    • -
    • [UNIC-1405] UniConfig shell: set nested JSON data (#1752)
    • -
    • Improved logging (#1798)
    • +
    • Uniconfig shell hide / unhide command implementation - UNIC-1075
    - -

    - # - 🔧 Other Changes -

    + +

    + # + Bug Fixes +

      -
    • Suppress CVEs
    • -
    • [UNIC-1475] changing information about expired transaction in root mode
    • -
    • Release 5.1.14
    • +
    • Prevented sending no description command if there is no change for rpd description (#1818)

      +
    • +
    • Uniconfig shell: information about expired transaction in the root mode

      +
    @@ -380,6 +343,6 @@

    - + diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.2/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.2/index.html index 23543bfea..09e863044 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.2/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.2/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.3/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.3/index.html index 9c19d7d2d..e9edbd4c5 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.3/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.3/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.4/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.4/index.html index 78ebe034c..1e37febd5 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.4/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.4/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.5/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.5/index.html index 15098e173..c16c37b5a 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.5/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.5/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.6/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.6/index.html index 8a77dcf5f..13795a0ad 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.6/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.6/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.7/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.7/index.html index 3be928347..a89163527 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.7/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.7/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.8/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.8/index.html index 85c6e56b7..d418ca6d3 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.8/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.8/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.1.9/index.html b/frinx-uniconfig/release-notes/uniconfig-5.1.9/index.html index fdb1fa023..ba9ac1a17 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.1.9/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.1.9/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.2.0/index.html b/frinx-uniconfig/release-notes/uniconfig-5.2.0/index.html index a9d6abb72..32999c056 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.2.0/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.2.0/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    @@ -440,7 +440,7 @@

    Previous - Uniconfig 5.​1.​14 + Uniconfig 5.​1.​14 Release Notes

    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.2.1/index.html b/frinx-uniconfig/release-notes/uniconfig-5.2.1/index.html index 90f02ecf0..7393a79fa 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.2.1/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.2.1/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/release-notes/uniconfig-5.2.2/index.html b/frinx-uniconfig/release-notes/uniconfig-5.2.2/index.html index 105ffd203..4bdb6e91c 100644 --- a/frinx-uniconfig/release-notes/uniconfig-5.2.2/index.html +++ b/frinx-uniconfig/release-notes/uniconfig-5.2.2/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/supported-devices/index.html b/frinx-uniconfig/supported-devices/index.html index 83aa279cc..5e21637b2 100644 --- a/frinx-uniconfig/supported-devices/index.html +++ b/frinx-uniconfig/supported-devices/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/aaa/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/aaa/index.html index 57d65884e..2390fb288 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/aaa/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/aaa/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/acl/acl/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/acl/acl/index.html index 00527967d..3a132dbbf 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/acl/acl/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/acl/acl/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/acl/acl_interfaces/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/acl/acl_interfaces/index.html index a91fc8349..4bc9977dc 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/acl/acl_interfaces/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/acl/acl_interfaces/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/bcast-containment/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/bcast-containment/index.html index 2432c81b7..a48be607b 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/bcast-containment/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/bcast-containment/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_downstream_profile/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_downstream_profile/index.html index 2bb7e2770..d9cfd2f5d 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_downstream_profile/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_downstream_profile/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_fiber_node/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_fiber_node/index.html index 25dd9f73e..0402a9559 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_fiber_node/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_fiber_node/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_rpd/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_rpd/index.html index ecc881931..ed0cad252 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_rpd/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/cable/cable_rpd/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/cdp/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/cdp/index.html index e84713ab0..1bf99d191 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/cdp/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/cdp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/evc/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/evc/index.html index 85e95dacf..d7f9c8af4 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/evc/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/evc/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/evpn/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/evpn/index.html index 633fd6780..c0ec2dde8 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/evpn/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/evpn/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/fdp/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/fdp/index.html index cf5b74a60..bd89cbc49 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/fdp/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/fdp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/hsrp/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/hsrp/index.html index 5021d52a3..2cf1edd6f 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/hsrp/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/hsrp/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/index.html index cffc5058e..22a2be58a 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/bridge_interface/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/bridge_interface/index.html index 66616dbb5..440c76bd5 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/bridge_interface/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/bridge_interface/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/cable_interface/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/cable_interface/index.html index 975b18f4b..d3fb1b67a 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/cable_interface/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/cable_interface/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/ethernet_interface/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/ethernet_interface/index.html index 29a8f3862..07422196f 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/ethernet_interface/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/ethernet_interface/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/l2vlan_interface/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/l2vlan_interface/index.html index 9bf3c9088..b45e8a710 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/l2vlan_interface/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/l2vlan_interface/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/l3vlan_interface/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/l3vlan_interface/index.html index 8b1f2fd1f..a1da71e92 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/l3vlan_interface/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/l3vlan_interface/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/lag_interface/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/lag_interface/index.html index bb1f4a7ab..e141848cd 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/lag_interface/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/lag_interface/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/wideband_interface/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/wideband_interface/index.html index 45b584106..c25c627f8 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/wideband_interface/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/interfaces/wideband_interface/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/ipsec/ipsec/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/ipsec/ipsec/index.html index 1ad54b386..d9356f277 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/ipsec/ipsec/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/ipsec/ipsec/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/l2-cft/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/l2-cft/index.html index 12318f9c5..291ea22ec 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/l2-cft/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/l2-cft/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/logging/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/logging/index.html index a1d1a404c..8944438de 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/logging/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/logging/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/netflow/netflow_interfaces/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/netflow/netflow_interfaces/index.html index ec02ea585..e29101748 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/netflow/netflow_interfaces/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/netflow/netflow_interfaces/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2p2p/connection_point/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2p2p/connection_point/index.html index 2bdd917b0..d497aba51 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2p2p/connection_point/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2p2p/connection_point/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vpn/connection_point_l2vpn/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vpn/connection_point_l2vpn/index.html index 3654f4e0d..9f4f932c0 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vpn/connection_point_l2vpn/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vpn/connection_point_l2vpn/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vsi/l2vsi/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vsi/l2vsi/index.html index 28e364efe..38c87607f 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vsi/l2vsi/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vsi/l2vsi/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vsi/l2vsicp/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vsi/l2vsicp/index.html index eb97a0f69..1a7c74ff3 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vsi/l2vsicp/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l2vsi/l2vsicp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l3vpn/network_instance_l3vpn_bgp/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l3vpn/network_instance_l3vpn_bgp/index.html index 5de1e005d..04701d71e 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l3vpn/network_instance_l3vpn_bgp/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l3vpn/network_instance_l3vpn_bgp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l3vpn/network_instance_l3vpn_ospf/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l3vpn/network_instance_l3vpn_ospf/index.html index 094184553..c0c74faa9 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l3vpn/network_instance_l3vpn_ospf/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/l3vpn/network_instance_l3vpn_ospf/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_ldp/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_ldp/index.html index 67c11d2fc..9d7d42d13 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_ldp/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_ldp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_rsvp/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_rsvp/index.html index 6933d3591..9015cba5c 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_rsvp/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_rsvp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_te/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_te/index.html index 0e98ea19a..89cc77899 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_te/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_te/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_tunnel/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_tunnel/index.html index 83dda9b68..5f6bc8d04 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_tunnel/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/mpls/mpls_tunnel/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/network_instance/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/network_instance/index.html index 47b5d405f..9d54872db 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/network_instance/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/network_instance/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/policy-forwarding/pf_interfaces/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/policy-forwarding/pf_interfaces/index.html index 2e1a7dcff..c1d270bad 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/policy-forwarding/pf_interfaces/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/policy-forwarding/pf_interfaces/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/bgp/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/bgp/index.html index 978df89b9..9a79d770c 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/bgp/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/bgp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/isis/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/isis/index.html index 9d009e412..09bc65f56 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/isis/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/isis/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/ospf/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/ospf/index.html index 6fb0694e1..b82d8db46 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/ospf/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/ospf/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/ospfv3/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/ospfv3/index.html index 3efd702a2..861636f91 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/ospfv3/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/ospfv3/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/static/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/static/index.html index 173f6bab0..f45c5a7b5 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/static/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/protocols/static/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/vlans/vlan/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/vlans/vlan/index.html index f4cd2d856..e207aa351 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/vlans/vlan/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/network-instances/vlans/vlan/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/oam/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/oam/index.html index c4d212c83..47a58122b 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/oam/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/oam/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/privilege/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/privilege/index.html index 87b7bbfba..55a3fa526 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/privilege/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/privilege/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/probes/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/probes/index.html index 912271c42..568e6dc7b 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/probes/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/probes/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/qos/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/qos/index.html index 739271583..e0a8d30bf 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/qos/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/qos/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/relay-agent/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/relay-agent/index.html index 865f473df..acd4ef250 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/relay-agent/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/relay-agent/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/routing-policy/routing-policy/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/routing-policy/routing-policy/index.html index 032d1607f..286700bfc 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/routing-policy/routing-policy/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/routing-policy/routing-policy/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/snmp/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/snmp/index.html index 2e9ad44e1..0fe916fd4 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/snmp/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/snmp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/stp/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/stp/index.html index 1239b8700..2be069458 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/stp/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/stp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/configuration-datasets/system/index.html b/frinx-uniconfig/translation-units-docs/configuration-datasets/system/index.html index 0099315e3..32039fec7 100644 --- a/frinx-uniconfig/translation-units-docs/configuration-datasets/system/index.html +++ b/frinx-uniconfig/translation-units-docs/configuration-datasets/system/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2p2p_local_to_oc/index.html b/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2p2p_local_to_oc/index.html index 175603e50..46976bd01 100644 --- a/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2p2p_local_to_oc/index.html +++ b/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2p2p_local_to_oc/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2p2p_remote_to_oc/index.html b/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2p2p_remote_to_oc/index.html index a4bcd30d7..28699fa35 100644 --- a/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2p2p_remote_to_oc/index.html +++ b/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2p2p_remote_to_oc/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2vpn_to_oc/index.html b/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2vpn_to_oc/index.html index 7533828a9..5132560fc 100644 --- a/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2vpn_to_oc/index.html +++ b/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l2vpn_to_oc/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l3vpn_to_oc/index.html b/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l3vpn_to_oc/index.html index da5b3c814..1610c58ab 100644 --- a/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l3vpn_to_oc/index.html +++ b/frinx-uniconfig/translation-units-docs/ietf-to-oc-mapping/ietf_l3vpn_to_oc/index.html @@ -6,7 +6,7 @@ - + @@ -31,12 +31,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/index.html b/frinx-uniconfig/translation-units-docs/index.html index 10d9b7351..f2df8258d 100644 --- a/frinx-uniconfig/translation-units-docs/index.html +++ b/frinx-uniconfig/translation-units-docs/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/operational-datasets/cdp/index.html b/frinx-uniconfig/translation-units-docs/operational-datasets/cdp/index.html index db4b4acb4..32450a9cc 100644 --- a/frinx-uniconfig/translation-units-docs/operational-datasets/cdp/index.html +++ b/frinx-uniconfig/translation-units-docs/operational-datasets/cdp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/operational-datasets/index.html b/frinx-uniconfig/translation-units-docs/operational-datasets/index.html index 1b927a360..969e662d1 100644 --- a/frinx-uniconfig/translation-units-docs/operational-datasets/index.html +++ b/frinx-uniconfig/translation-units-docs/operational-datasets/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/translation-units-docs/operational-datasets/interfaces/index.html b/frinx-uniconfig/translation-units-docs/operational-datasets/interfaces/index.html index 9d841ff12..242cf683d 100644 --- a/frinx-uniconfig/translation-units-docs/operational-datasets/interfaces/index.html +++ b/frinx-uniconfig/translation-units-docs/operational-datasets/interfaces/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/operational-datasets/lldp/index.html b/frinx-uniconfig/translation-units-docs/operational-datasets/lldp/index.html index c731f1034..b2375f69e 100644 --- a/frinx-uniconfig/translation-units-docs/operational-datasets/lldp/index.html +++ b/frinx-uniconfig/translation-units-docs/operational-datasets/lldp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/bgp_rib/index.html b/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/bgp_rib/index.html index b1aca3562..656f67d3c 100644 --- a/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/bgp_rib/index.html +++ b/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/bgp_rib/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/bgp_summary/index.html b/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/bgp_summary/index.html index f8192b87e..85c9b44a1 100644 --- a/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/bgp_summary/index.html +++ b/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/bgp_summary/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/ospf_summary/index.html b/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/ospf_summary/index.html index 6b6f91672..4e1bd3698 100644 --- a/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/ospf_summary/index.html +++ b/frinx-uniconfig/translation-units-docs/operational-datasets/network-instances/protocols/ospf_summary/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/operational-datasets/platform/index.html b/frinx-uniconfig/translation-units-docs/operational-datasets/platform/index.html index 7ca3d8491..7453b93f7 100644 --- a/frinx-uniconfig/translation-units-docs/operational-datasets/platform/index.html +++ b/frinx-uniconfig/translation-units-docs/operational-datasets/platform/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/operational-datasets/system/index.html b/frinx-uniconfig/translation-units-docs/operational-datasets/system/index.html index 909a06f49..93806f2c4 100644 --- a/frinx-uniconfig/translation-units-docs/operational-datasets/system/index.html +++ b/frinx-uniconfig/translation-units-docs/operational-datasets/system/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/translation-units-docs/translation-framework-101/index.html b/frinx-uniconfig/translation-units-docs/translation-framework-101/index.html index a64cda1a1..d7e3c5229 100644 --- a/frinx-uniconfig/translation-units-docs/translation-framework-101/index.html +++ b/frinx-uniconfig/translation-units-docs/translation-framework-101/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/basic-concepts/index.html b/frinx-uniconfig/user-guide/basic-concepts/index.html index 057a480ed..4d63a6475 100644 --- a/frinx-uniconfig/user-guide/basic-concepts/index.html +++ b/frinx-uniconfig/user-guide/basic-concepts/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/index.html b/frinx-uniconfig/user-guide/index.html index f3438eed0..c77a70404 100644 --- a/frinx-uniconfig/user-guide/index.html +++ b/frinx-uniconfig/user-guide/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/monitoring/index.html b/frinx-uniconfig/user-guide/monitoring/index.html index 00cef051d..d336e0075 100644 --- a/frinx-uniconfig/user-guide/monitoring/index.html +++ b/frinx-uniconfig/user-guide/monitoring/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/index.html b/frinx-uniconfig/user-guide/network-management-protocols/index.html index 0ec2d0140..802b7a918 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig-installing/index.html b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig-installing/index.html index 09e4d6c0b..4dfde8333 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig-installing/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig-installing/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig-native_cli/index.html b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig-native_cli/index.html index 0f7a955fe..f7f562d7d 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig-native_cli/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig-native_cli/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_cli/index.html b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_cli/index.html index e66b04669..66634f366 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_cli/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_cli/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/calix/index.html b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/calix/index.html index cc8132f7a..0e30d812b 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/calix/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/calix/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/index.html b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/index.html index 4bc194dba..c7bfb9d10 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/iosxr/index.html b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/iosxr/index.html index 785fcd167..8c035e7d6 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/iosxr/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/iosxr/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/junos/index.html b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/junos/index.html index abe4430ad..14fd0f74e 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/junos/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/junos/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/ocnos/index.html b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/ocnos/index.html index 9681e3b1f..ce2fcb2cb 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/ocnos/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/ocnos/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/sros/index.html b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/sros/index.html index 8ebbc63b7..24e7793ee 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/sros/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_netconf/sros/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_snmp/index.html b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_snmp/index.html index b68bca30d..7a59c36a9 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_snmp/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/uniconfig_snmp/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/network-management-protocols/updating-installation-parameters/index.html b/frinx-uniconfig/user-guide/network-management-protocols/updating-installation-parameters/index.html index 0e2404db6..464bd6f04 100644 --- a/frinx-uniconfig/user-guide/network-management-protocols/updating-installation-parameters/index.html +++ b/frinx-uniconfig/user-guide/network-management-protocols/updating-installation-parameters/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/operational-procedures/data-flows/index.html b/frinx-uniconfig/user-guide/operational-procedures/data-flows/index.html index f89580cb2..3abc53ade 100644 --- a/frinx-uniconfig/user-guide/operational-procedures/data-flows/index.html +++ b/frinx-uniconfig/user-guide/operational-procedures/data-flows/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/operational-procedures/data-security-models/index.html b/frinx-uniconfig/user-guide/operational-procedures/data-security-models/index.html index 65b8f4860..807eafcd1 100644 --- a/frinx-uniconfig/user-guide/operational-procedures/data-security-models/index.html +++ b/frinx-uniconfig/user-guide/operational-procedures/data-security-models/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/operational-procedures/index.html b/frinx-uniconfig/user-guide/operational-procedures/index.html index 4d85aac27..486db07ee 100644 --- a/frinx-uniconfig/user-guide/operational-procedures/index.html +++ b/frinx-uniconfig/user-guide/operational-procedures/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/operational-procedures/logging/index.html b/frinx-uniconfig/user-guide/operational-procedures/logging/index.html index 1919eb6c3..b29dae9b0 100644 --- a/frinx-uniconfig/user-guide/operational-procedures/logging/index.html +++ b/frinx-uniconfig/user-guide/operational-procedures/logging/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/operational-procedures/openapi/index.html b/frinx-uniconfig/user-guide/operational-procedures/openapi/index.html index 019678db2..6ccc5b4ca 100644 --- a/frinx-uniconfig/user-guide/operational-procedures/openapi/index.html +++ b/frinx-uniconfig/user-guide/operational-procedures/openapi/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/operational-procedures/postgres-tls/index.html b/frinx-uniconfig/user-guide/operational-procedures/postgres-tls/index.html index 5bdd27845..d5cf3108d 100644 --- a/frinx-uniconfig/user-guide/operational-procedures/postgres-tls/index.html +++ b/frinx-uniconfig/user-guide/operational-procedures/postgres-tls/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/operational-procedures/thread-pools/index.html b/frinx-uniconfig/user-guide/operational-procedures/thread-pools/index.html index 6e99ef9b6..d2f45852b 100644 --- a/frinx-uniconfig/user-guide/operational-procedures/thread-pools/index.html +++ b/frinx-uniconfig/user-guide/operational-procedures/thread-pools/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/operational-procedures/tls/index.html b/frinx-uniconfig/user-guide/operational-procedures/tls/index.html index add3c39d6..cafa5271c 100644 --- a/frinx-uniconfig/user-guide/operational-procedures/tls/index.html +++ b/frinx-uniconfig/user-guide/operational-procedures/tls/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/operational-procedures/uniconfig-clustering/index.html b/frinx-uniconfig/user-guide/operational-procedures/uniconfig-clustering/index.html index 8d755490e..7200d409d 100644 --- a/frinx-uniconfig/user-guide/operational-procedures/uniconfig-clustering/index.html +++ b/frinx-uniconfig/user-guide/operational-procedures/uniconfig-clustering/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/operational-procedures/uniconfig-properties/index.html b/frinx-uniconfig/user-guide/operational-procedures/uniconfig-properties/index.html index 70477185e..ea9475738 100644 --- a/frinx-uniconfig/user-guide/operational-procedures/uniconfig-properties/index.html +++ b/frinx-uniconfig/user-guide/operational-procedures/uniconfig-properties/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/performance-and-scale/performance_characteristics/index.html b/frinx-uniconfig/user-guide/performance-and-scale/performance_characteristics/index.html index a1b186e82..dc26ef933 100644 --- a/frinx-uniconfig/user-guide/performance-and-scale/performance_characteristics/index.html +++ b/frinx-uniconfig/user-guide/performance-and-scale/performance_characteristics/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/sdk/index.html b/frinx-uniconfig/user-guide/sdk/index.html index 27dbe5028..f57e7d4aa 100644 --- a/frinx-uniconfig/user-guide/sdk/index.html +++ b/frinx-uniconfig/user-guide/sdk/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/admin-state/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/admin-state/index.html index eff7714f1..e21c5ab78 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/admin-state/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/admin-state/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/build-and-commit-model/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/build-and-commit-model/index.html index ecec99391..cdf01de6c 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/build-and-commit-model/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/build-and-commit-model/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/device-discovery/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/device-discovery/index.html index a7b75aefa..0e522b6ca 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/device-discovery/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/device-discovery/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/dryrun-manager/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/dryrun-manager/index.html index 752e3cbf2..10fb9d8de 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/dryrun-manager/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/dryrun-manager/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/immediate-commit-model/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/immediate-commit-model/index.html index 1c512fd18..1f98bce7c 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/immediate-commit-model/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/immediate-commit-model/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/index.html index 77ad68833..511cf6081 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/application-jsonb-filtering/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/application-jsonb-filtering/index.html index 9c1acd518..6b911b081 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/application-jsonb-filtering/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/application-jsonb-filtering/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/database-jsonb-filtering/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/database-jsonb-filtering/index.html index ae3dc1797..27da42ed7 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/database-jsonb-filtering/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/database-jsonb-filtering/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/index.html index 003ebf572..e7737cb22 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/jsonb-filtering/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/kafka-notifications/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/kafka-notifications/index.html index 9abe37e6d..289f7dea8 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/kafka-notifications/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/kafka-notifications/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/operational-data-about-transactions/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/operational-data-about-transactions/index.html index 37eceb372..eaaf5fd42 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/operational-data-about-transactions/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/operational-data-about-transactions/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/restconf/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/restconf/index.html index 8c598c8c6..5e554aa2e 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/restconf/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/restconf/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/index.html index eeab064ff..5dce7b8cc 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/obtain_snapshot_metadata/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/obtain_snapshot_metadata/index.html index d1cb3fe72..0e2ccef39 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/obtain_snapshot_metadata/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/obtain_snapshot_metadata/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_create-snapshot/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_create-snapshot/index.html index ed152b806..c9b9432a3 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_create-snapshot/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_create-snapshot/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_delete-snapshot/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_delete-snapshot/index.html index 8c24bd02a..bd77bec60 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_delete-snapshot/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_delete-snapshot/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_replace-config-with-snapshot/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_replace-config-with-snapshot/index.html index 126368a89..c4704be3c 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_replace-config-with-snapshot/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/snapshot-manager/rpc_replace-config-with-snapshot/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/index.html index 2614e5784..cdfdb44c8 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_bulk-edit/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_bulk-edit/index.html index b4161735d..14cbce514 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_bulk-edit/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_bulk-edit/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_calculate-subtree-diff/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_calculate-subtree-diff/index.html index ab5869f58..8273d2051 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_calculate-subtree-diff/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_calculate-subtree-diff/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_calculate-subtree-git-like-diff/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_calculate-subtree-git-like-diff/index.html index d35cd1a1a..e2b7b7fc1 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_calculate-subtree-git-like-diff/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_calculate-subtree-git-like-diff/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-many-to-one/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-many-to-one/index.html index b5c2847da..c7a37fc0a 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-many-to-one/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-many-to-one/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-one-to-many/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-one-to-many/index.html index c9e977fd0..a5f2a2360 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-one-to-many/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-one-to-many/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-one-to-one/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-one-to-one/index.html index 1a6c0eaca..7730d6812 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-one-to-one/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/subtree-manager/rpc_copy-one-to-one/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/templates-manager/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/templates-manager/index.html index 31da2fdba..c9b3d88e8 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/templates-manager/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/templates-manager/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/index.html index a7610286c..9aebb82a3 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/rpc_revert-changes/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/rpc_revert-changes/index.html index ee8c73ff4..226463968 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/rpc_revert-changes/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/rpc_revert-changes/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/transaction-tracker/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/transaction-tracker/index.html index 8c5646dc0..c79ea0084 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/transaction-tracker/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/transaction-log/transaction-tracker/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/index.html index 070ea40ef..ea3fa6c0c 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_calculate-diff/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_calculate-diff/index.html index e341edc73..55e8e60c0 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_calculate-diff/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_calculate-diff/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_calculate-git-like-diff/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_calculate-git-like-diff/index.html index 95daca2e1..163135368 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_calculate-git-like-diff/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_calculate-git-like-diff/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_checked-commit/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_checked-commit/index.html index f4cbd4039..f139aa69a 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_checked-commit/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_checked-commit/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_commit/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_commit/index.html index 9306a7065..fa4185aca 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_commit/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_commit/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_compare-config/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_compare-config/index.html index f1705d1b5..1fd3ad196 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_compare-config/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_compare-config/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_health/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_health/index.html index 1ad3b46a1..ca05e074f 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_health/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_health/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_is-in-sync/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_is-in-sync/index.html index 93d39f137..c8b998b9a 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_is-in-sync/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_is-in-sync/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_replace-config-with-oper/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_replace-config-with-oper/index.html index dcecce299..ae6755a32 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_replace-config-with-oper/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_replace-config-with-oper/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_sync-from-network/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_sync-from-network/index.html index 0797fd882..f093592cf 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_sync-from-network/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_sync-from-network/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_sync-to-network/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_sync-to-network/index.html index d210ca395..e38485251 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_sync-to-network/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_sync-to-network/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_validate/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_validate/index.html index eea235dd0..9d3361e5e 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_validate/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_validate/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_check_installed_devices/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_check_installed_devices/index.html index 8ee056017..b8017f6fb 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_check_installed_devices/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_check_installed_devices/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_get_installed_devices/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_get_installed_devices/index.html index 2a2566c6e..0919e79d9 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_get_installed_devices/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_get_installed_devices/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_install_multiple_nodes/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_install_multiple_nodes/index.html index acf54f173..3791719da 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_install_multiple_nodes/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_install_multiple_nodes/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_uninstall_multiple_nodes/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_uninstall_multiple_nodes/index.html index 0deeee91c..74562fc3e 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_uninstall_multiple_nodes/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_uninstall_multiple_nodes/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/index.html index 21082b7d7..7e9dd17ec 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/rpc_read-properties/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/rpc_read-properties/index.html index 9f5847f2a..8cd0d129b 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/rpc_read-properties/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/rpc_read-properties/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/rpc_update-properties/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/rpc_update-properties/index.html index 7c43e75fc..c90dcca63 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/rpc_update-properties/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-properties/rpc_update-properties/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-queries/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-queries/index.html index 7940138a6..32a57c4d4 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-queries/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-queries/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-shell/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-shell/index.html index d49de0bdf..0c77b173f 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-shell/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/uniconfig-shell/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/unistore-api/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/unistore-api/index.html index 57a3a99d0..b4e973f38 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/unistore-api/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/unistore-api/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/utilities/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/utilities/index.html index b48c01721..4fffafc98 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/utilities/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/utilities/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/utilities/openapi-diff/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/utilities/openapi-diff/index.html index 5c24539bc..723f25162 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/utilities/openapi-diff/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/utilities/openapi-diff/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/utilities/yang-packager/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/utilities/yang-packager/index.html index 5f6095f0d..4d02532d2 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/utilities/yang-packager/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/utilities/yang-packager/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-uniconfig/user-guide/uniconfig-operations/yang-patch/index.html b/frinx-uniconfig/user-guide/uniconfig-operations/yang-patch/index.html index c2d74ecf4..8c96481a7 100644 --- a/frinx-uniconfig/user-guide/uniconfig-operations/yang-patch/index.html +++ b/frinx-uniconfig/user-guide/uniconfig-operations/yang-patch/index.html @@ -6,7 +6,7 @@ - + @@ -34,12 +34,12 @@ - + - + - - + +
    diff --git a/frinx-workflow-manager/blueprints/index.html b/frinx-workflow-manager/blueprints/index.html index 214d7333e..731a1ea4e 100644 --- a/frinx-workflow-manager/blueprints/index.html +++ b/frinx-workflow-manager/blueprints/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-workflow-manager/create-and-modify-workflows/index.html b/frinx-workflow-manager/create-and-modify-workflows/index.html index 329a025a2..9a7a8c255 100644 --- a/frinx-workflow-manager/create-and-modify-workflows/index.html +++ b/frinx-workflow-manager/create-and-modify-workflows/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-workflow-manager/introduction/index.html b/frinx-workflow-manager/introduction/index.html index 248ab8ac0..d0d50086e 100644 --- a/frinx-workflow-manager/introduction/index.html +++ b/frinx-workflow-manager/introduction/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/frinx-workflow-manager/inventory/index.html b/frinx-workflow-manager/inventory/index.html index dab7374b0..c716df124 100644 --- a/frinx-workflow-manager/inventory/index.html +++ b/frinx-workflow-manager/inventory/index.html @@ -6,7 +6,7 @@ - + @@ -36,12 +36,12 @@ - + - + - - + +
    diff --git a/frinx-workflow-manager/workflow-builder/index.html b/frinx-workflow-manager/workflow-builder/index.html index 9406204a5..ac38b5f4d 100644 --- a/frinx-workflow-manager/workflow-builder/index.html +++ b/frinx-workflow-manager/workflow-builder/index.html @@ -6,7 +6,7 @@ - + @@ -36,11 +36,11 @@ - + - + - +
    diff --git a/index.html b/index.html index b9f97f77c..39a5eb868 100644 --- a/index.html +++ b/index.html @@ -6,7 +6,7 @@ - + @@ -34,11 +34,11 @@ - + - + - +
    diff --git a/resources/js/config.js b/resources/js/config.js index 4e65bb025..20b69b62b 100644 --- a/resources/js/config.js +++ b/resources/js/config.js @@ -1 +1 @@ -var __DOCS_CONFIG__ = {"id":"IwP142d7/gzu2QiqTCd63VNRLJ6d5DMs2RH","key":"gdTzs/B6i9jTGE5mu1m4zH+6jjpBg7EfaeIRt/Hj74s.d78VjKLlzdXF9JXUkzbXxUplF9YFuK6gnCdw4VeleUy65vCy1A5t6P6VVWkcgObLzdrkFxaaDISNZJdDaa3naA.108198","base":"/","host":"docs.frinx.io","version":"1.0.0","useRelativePaths":true,"documentName":"index.html","appendDocumentName":false,"trailingSlash":true,"preloadSearch":false,"cacheBustingToken":"2.4.0.752073393482","cacheBustingStrategy":"query","sidebarFilterPlaceholder":"Filter","toolbarFilterPlaceholder":"Filter","showSidebarFilter":true,"filterNotFoundMsg":"No member names found containing the query \"{query}\"","maxHistoryItems":15,"homeIcon":"","access":[{"value":"public","label":"Public"},{"value":"protected","label":"Protected"}],"toolbarLinks":[{"id":"fields","label":"Fields"},{"id":"properties","label":"Properties","shortLabel":"Props"},{"id":"methods","label":"Methods"},{"id":"events","label":"Events"}],"sidebar":[{"n":"/","l":"Welcome","s":""},{"n":"frinx-machine","l":"Frinx Machine","c":false,"i":[{"n":"getting-started","l":"FRINX Machine introduction","s":""},{"n":"azure-ad","l":"Frinx Machine with Azure AD","s":""},{"n":"monitoring","l":"Monitoring with Grafana","s":""},{"n":"use-cases","l":"Demo Use Cases","i":[{"n":"add-to-inventory-and-install","l":"Add a device to inventory and install it"},{"n":"create-l2-vpn-p2p","l":"Creating a Layer 2 VPN Point-​to-​Point Connection"},{"n":"frinx-machine-demo-manual","l":"FRINX Machine Demo Manual"},{"n":"install-all-devices-from-inventory","l":"Install all devices from inventory"},{"n":"policy-filter-xr","l":"Policy filter XR"}],"s":""}],"s":""},{"n":"frinx-uniconfig","l":"Frinx Uni​Config","c":false,"i":[{"n":"getting-started","l":"Getting started","s":""},{"n":"user-guide","l":"User Guide","i":[{"n":"basic-concepts","l":"Basic Concepts"},{"n":"network-management-protocols","l":"Device Installation","i":[{"n":"uniconfig-installing","l":"Device Installation"},{"n":"uniconfig_cli","l":"Uni​Config CLI"},{"n":"uniconfig_netconf","l":"Uni​Config NETCONF","i":[{"n":"calix","l":"Calix devices"},{"n":"iosxr","l":"Cisco IOS XR devices"},{"n":"ocnos","l":"IP Infusion Oc​NOS Devices"},{"n":"junos","l":"Juniper Junos devices"},{"n":"sros","l":"Nokia SROS devices"}]},{"n":"uniconfig_snmp","l":"Uni​Config SNMP"},{"n":"updating-installation-parameters","l":"Updating installation parameters"},{"n":"uniconfig-native_cli","l":"Uni​Config-​native CLI"}]},{"n":"uniconfig-operations","l":"Uni​Config Operations","i":[{"n":"jsonb-filtering","l":"JSONB Filtering","i":[{"n":"application-jsonb-filtering","l":"Application JSONB Filtering"},{"n":"database-jsonb-filtering","l":"Database JSONB Filtering"}]},{"n":"snapshot-manager","l":"Snapshot Manager","i":[{"n":"obtain_snapshot_metadata","l":"Obtaining snapshots-​metadata"},{"n":"rpc_create-snapshot","l":"RPC create-​snapshot"},{"n":"rpc_delete-snapshot","l":"RPC delete-​snapshot"},{"n":"rpc_replace-config-with-snapshot","l":"RPC replace-​config-​with-​snapshot"}]},{"n":"subtree-manager","l":"Subtree Manager","i":[{"n":"rpc_bulk-edit","l":"RPC bulk-​edit"},{"n":"rpc_calculate-subtree-diff","l":"RPC calculate-​subtree-​diff"},{"n":"rpc_calculate-subtree-git-like-diff","l":"RPC calculate-​subtree-​git-​like-​diff"},{"n":"rpc_copy-many-to-one","l":"RPC copy-​many-​to-​one"},{"n":"rpc_copy-one-to-many","l":"RPC copy-​one-​to-​many"},{"n":"rpc_copy-one-to-one","l":"RPC copy-​one-​to-​one"}]},{"n":"transaction-log","l":"Transaction Log","i":[{"n":"rpc_revert-changes","l":"RPC revert-​changes"},{"n":"transaction-tracker","l":"Transaction tracker"}]},{"n":"uniconfig-node-manager","l":"Uni​Config Node Manager","i":[{"n":"rpc_calculate-diff","l":"RPC calculate-​diff"},{"n":"rpc_calculate-git-like-diff","l":"RPC calculate-​git-​like-​diff"},{"n":"uniconfig_check_installed_devices","l":"RPC check-​installed-​nodes"},{"n":"rpc_checked-commit","l":"RPC checked-​commit"},{"n":"rpc_commit","l":"RPC commit"},{"n":"rpc_compare-config","l":"RPC compare-​config"},{"n":"uniconfig_get_installed_devices","l":"RPC get-​installed-​nodes"},{"n":"rpc_health","l":"RPC health"},{"n":"uniconfig_install_multiple_nodes","l":"RPC install-​multiple-​nodes"},{"n":"rpc_is-in-sync","l":"RPC is-​in-​sync"},{"n":"rpc_replace-config-with-oper","l":"RPC replace-​config-​with-​operational"},{"n":"rpc_sync-from-network","l":"RPC sync-​from-​network"},{"n":"rpc_sync-to-network","l":"RPC sync-​to-​network"},{"n":"uniconfig_uninstall_multiple_nodes","l":"RPC uninstall-​multiple-​nodes"},{"n":"rpc_validate","l":"RPC validate"}]},{"n":"uniconfig-properties","l":"Uni​Config properties","i":[{"n":"rpc_read-properties","l":"RPC read-​properties"},{"n":"rpc_update-properties","l":"RPC update-​properties"}]},{"n":"utilities","l":"Utilities","i":[{"n":"openapi-diff","l":"Difference between Open​API specifications"},{"n":"yang-packager","l":"YANG packager"}]},{"n":"admin-state","l":"Admin State"},{"n":"build-and-commit-model","l":"Build-​and-​Commit Model"},{"n":"device-discovery","l":"Device Discovery"},{"n":"dryrun-manager","l":"Dry-​run manager"},{"n":"immediate-commit-model","l":"Immediate Commit Model"},{"n":"kafka-notifications","l":"Kafka Notifications"},{"n":"operational-data-about-transactions","l":"Operational data about transactions"},{"n":"templates-manager","l":"Templates Manager"},{"n":"restconf","l":"Uni​Config - Sending and receiving data (RESTCONF)"},{"n":"uniconfig-queries","l":"Uni​Config Queries"},{"n":"uniconfig-shell","l":"Uni​Config Shell"},{"n":"unistore-api","l":"Uni​Store API"},{"n":"yang-patch","l":"YANG Patch Operations"}]},{"n":"operational-procedures","l":"Operational Procedures","i":[{"n":"data-flows","l":"Data flows and transformations"},{"n":"data-security-models","l":"Data Security Models"},{"n":"logging","l":"Logging Framework"},{"n":"openapi","l":"Open​API"},{"n":"thread-pools","l":"Thread pools"},{"n":"postgres-tls","l":"TLS encryption for Postgres database"},{"n":"tls","l":"TLS-​based Authentication"},{"n":"uniconfig-clustering","l":"Uni​Config Clustering"},{"n":"uniconfig-properties","l":"Uniconfig properties"}]},{"n":"performance-and-scale","l":"Performance and scale","c":false,"i":[{"n":"performance_characteristics","l":"Performance characteristics"}]},{"n":"monitoring","l":"Monitoring"},{"n":"sdk","l":"Uni​Config Client (SDK)"}]},{"n":"developer-guide","l":"Developer Guide","i":[{"n":"architecture","l":"Architecture"},{"n":"translation-units-in-general","l":"Translation Units in general"},{"n":"translation-units-docs","l":"Translation Units Documentation for FRINX Uniconfig"},{"n":"open-config-to-device-config-mapping","l":"Open​Config to device config mapping"},{"n":"translation-unit-general-implementation","l":"Developing a new translation unit"},{"n":"cli-translation-unit","l":"Implementing CLI Translation Unit"},{"n":"netconf-translation-unit","l":"NETCONF Unified Translation Unit"},{"n":"native-cli-units","l":"Native-​CLI translation units"},{"n":"metrics","l":"Metrics"}]},{"n":"release-notes","l":"Release notes","i":[{"n":"uniconfig-5.0.7","l":"Uniconfig 5.​0.​7 Release Notes"},{"n":"uniconfig-5.0.6","l":"Uni​Config 5.​0.​6"},{"n":"uniconfig-5.0.5","l":"Uni​Config 5.​0.​5"},{"n":"uniconfig-5.0.4","l":"Uni​Config 5.​0.​4"},{"n":"uniconfig-5.0.3","l":"Uni​Config 5.​0.​3"},{"n":"uniconfig-5.0.2","l":"Uni​Config 5.​0.​2"},{"n":"uniconfig-5.0.1","l":"Uni​Config 5.​0.​1"},{"n":"uniconfig-4.2.10","l":"Uni​Config 4.​2.​10"},{"n":"uniconfig-4.2.9","l":"Uni​Config 4.​2.​9"},{"n":"uniconfig-4.2.8","l":"Uni​Config 4.​2.​8"},{"n":"uniconfig-4.2.7","l":"Uni​Config 4.​2.​7"},{"n":"uniconfig-4.2.6","l":"Uni​Config 4.​2.​6"},{"n":"uniconfig-4.2.5","l":"Uni​Config 4.​2.​5"},{"n":"uniconfig-4.2.4","l":"Uni​Config 4.​2.​4"},{"n":"uniconfig-4.2.3","l":"Uni​Config 4.​2.​3"},{"n":"uniconfig-5.0.8","l":"Uniconfig 5.​0.​8 Release Notes"},{"n":"uniconfig-5.0.9","l":"Uniconfig 5.​0.​9 Release Notes"},{"n":"uniconfig-5.0.10","l":"Uniconfig 5.​0.​10 Release Notes"},{"n":"uniconfig-5.0.11","l":"Uniconfig 5.​0.​11 Release Notes"},{"n":"uniconfig-5.0.12","l":"Uniconfig 5.​0.​12 Release Notes"},{"n":"uniconfig-5.0.13","l":"Uniconfig 5.​0.​13 Release Notes"},{"n":"uniconfig-5.0.14","l":"Uniconfig 5.​0.​14 Release Notes"},{"n":"uniconfig-5.0.15","l":"Uniconfig 5.​0.​15 Release Notes"},{"n":"uniconfig-5.0.16","l":"Uniconfig 5.​0.​16 Release Notes"},{"n":"uniconfig-5.0.17","l":"Uniconfig 5.​0.​17 Release Notes"},{"n":"uniconfig-5.0.18","l":"Uniconfig 5.​0.​18 Release Notes"},{"n":"uniconfig-5.0.19","l":"Uniconfig 5.​0.​19 Release Notes"},{"n":"uniconfig-5.0.20","l":"Uniconfig 5.​0.​20 Release Notes"},{"n":"uniconfig-5.0.21","l":"Uniconfig 5.​0.​21 Release Notes"},{"n":"uniconfig-5.0.22","l":"Uniconfig 5.​0.​22 Release Notes"},{"n":"uniconfig-5.0.23","l":"Uniconfig 5.​0.​23 Release Notes"},{"n":"uniconfig-5.0.24","l":"Uniconfig 5.​0.​24 Release Notes"},{"n":"uniconfig-5.0.25","l":"Uniconfig 5.​0.​25 Release Notes"},{"n":"uniconfig-5.1.0","l":"Uniconfig 5.​1.​0 Release Notes"},{"n":"uniconfig-5.1.1","l":"Uniconfig 5.​1.​1 Release Notes"},{"n":"uniconfig-5.1.2","l":"Uniconfig 5.​1.​2 Release Notes"},{"n":"uniconfig-5.1.3","l":"Uniconfig 5.​1.​3 Release Notes"},{"n":"uniconfig-5.1.4","l":"Uniconfig 5.​1.​4 Release Notes"},{"n":"uniconfig-5.1.5","l":"Uniconfig 5.​1.​5 Release Notes"},{"n":"uniconfig-5.1.6","l":"Uniconfig 5.​1.​6 Release Notes"},{"n":"uniconfig-5.1.7","l":"Uniconfig 5.​1.​7 Release Notes"},{"n":"uniconfig-5.1.8","l":"Uniconfig 5.​1.​8 Release Notes"},{"n":"uniconfig-5.1.9","l":"Uniconfig 5.​1.​9 Release Notes"},{"n":"uniconfig-5.1.10","l":"Uniconfig 5.​1.​10 Release Notes"},{"n":"uniconfig-5.1.11","l":"Uniconfig 5.​1.​11 Release Notes"},{"n":"uniconfig-5.1.12","l":"Uniconfig 5.​1.​12 Release Notes"},{"n":"uniconfig-5.1.13","l":"Uniconfig 5.​1.​13"},{"n":"uniconfig-5.1.14","l":"Uniconfig 5.​1.​14"},{"n":"uniconfig-5.2.0","l":"Uniconfig 5.​2.​0 Release Notes"},{"n":"uniconfig-5.2.1","l":"Uniconfig 5.​2.​1"},{"n":"uniconfig-5.2.2","l":"Uniconfig 5.​2.​2"}]},{"n":"translation-units-docs","l":"Translation Units","i":[{"n":"ietf-to-oc-mapping","l":"Ietf to oc mapping","c":false,"i":[{"n":"ietf_l2p2p_local_to_oc","l":"IETF L​2​VPN YANG"},{"n":"ietf_l2p2p_remote_to_oc","l":"IETF L​2​VPN YANG"},{"n":"ietf_l2vpn_to_oc","l":"IETF L​2​VPN YANG"},{"n":"ietf_l3vpn_to_oc","l":"IETF L​3​VPN YANG"}]},{"n":"configuration-datasets","l":"Interfaces","i":[{"n":"acl","l":"Acl","c":false,"i":[{"n":"acl_interfaces","l":"Access Control List"},{"n":"acl","l":"Access Control List"}]},{"n":"cable","l":"Cable","c":false,"i":[{"n":"cable_downstream_profile","l":"cable DOWNSTREAM CONTROLLER-​PROFILE"},{"n":"cable_fiber_node","l":"cable FIBER-​NODE"},{"n":"cable_rpd","l":"cable RPD"}]},{"n":"interfaces","l":"Interfaces","c":false,"i":[{"n":"bridge_interface","l":"BRIDGE interface"},{"n":"cable_interface","l":"CABLE interface"},{"n":"ethernet_interface","l":"Ethernet interface"},{"n":"l2vlan_interface","l":"L​2​VLAN interface"},{"n":"l3vlan_interface","l":"L​3 VLAN interface"},{"n":"lag_interface","l":"Link Aggregation Group (bundle) interface"},{"n":"wideband_interface","l":"WIDEBAND interface"}]},{"n":"ipsec","l":"Ipsec","c":false,"i":[{"n":"ipsec","l":"Internet Protocol Security (I​Psec)"}]},{"n":"netflow","l":"Netflow","c":false,"i":[{"n":"netflow_interfaces","l":"Net​Flow"}]},{"n":"network-instances","l":"Network instances","c":false,"i":[{"n":"l2p2p","l":"L​2​p​2​p","c":false,"i":[{"n":"connection_point","l":"L​2​P​2​P configuration"}]},{"n":"l2vpn","l":"L​2​vpn","c":false,"i":[{"n":"connection_point_l2vpn","l":"L​2​VPN (VPLS with BGP autodiscovery) configuration"}]},{"n":"l2vsi","l":"L​2​vsi","c":false,"i":[{"n":"l2vsicp","l":"L​2​VSI (L​2 virtual switch instance virtual circuit)"},{"n":"l2vsi","l":"L​2​VSI (L​2 virtual switch instance)"}]},{"n":"l3vpn","l":"L​3​vpn","c":false,"i":[{"n":"network_instance_l3vpn_bgp","l":"L​3​VPN configuration (BGP as CE-​PE protocol)"},{"n":"network_instance_l3vpn_ospf","l":"L​3​VPN configuration (OSPF as CE-​PE protocol)"}]},{"n":"mpls","l":"Mpls","c":false,"i":[{"n":"mpls_ldp","l":"Multiprotocol Label Switching - Label Distribution Protocol (MPLS LDP)"},{"n":"mpls_rsvp","l":"Multiprotocol Label Switching - Resource Reservation Protocol (MPLS RSVP)"},{"n":"mpls_te","l":"Multiprotocol Label Switching - Traffic Engineering (MPLS-​TE)"},{"n":"mpls_tunnel","l":"Multiprotocol Label Switching - Tunnel"}]},{"n":"policy-forwarding","l":"Policy forwarding","c":false,"i":[{"n":"pf_interfaces","l":"Interface policy configuration"}]},{"n":"protocols","l":"Protocols","c":false,"i":[{"n":"bgp","l":"Border Gateway Protocol (BGP)"},{"n":"isis","l":"Intermediate System to Intermediate System (IS-​IS)"},{"n":"ospf","l":"Open Shortest Path First (OSPF)"},{"n":"ospfv3","l":"Open Shortest Path First v​3 (OSP​Fv​3)"},{"n":"static","l":"Static Route"}]},{"n":"vlans","l":"Vlans","c":false,"i":[{"n":"vlan","l":"VLAN"}]},{"n":"network_instance","l":"Configure network instance (VRF)"}]},{"n":"routing-policy","l":"Routing policy","c":false,"i":[{"n":"routing-policy","l":"Routing Policy"}]},{"n":"aaa","l":"AAA - Authentication Authorization Accounting"},{"n":"bcast-containment","l":"Broadcast-​Containment (Broadcast-​containment filters)"},{"n":"cdp","l":"Configure CDP interfaces"},{"n":"fdp","l":"Configure FDP interfaces"},{"n":"stp","l":"Configure STP interfaces"},{"n":"oam","l":"Ethernet OAM / Ethernet CFM"},{"n":"evc","l":"Ethernet Virtual Circuit (EVC)"},{"n":"evpn","l":"Ethernet Virtual Private Network (EVPN)"},{"n":"hsrp","l":"Hot Standby Router Protocol (HSRP)"},{"n":"l2-cft","l":"L​2-​Cft (Layer 2 Control Frame Forwarding)"},{"n":"logging","l":"Logging (syslog)"},{"n":"privilege","l":"Privilege"},{"n":"probes","l":"Probes"},{"n":"qos","l":"Quality of Service"},{"n":"relay-agent","l":"Relay Agent"},{"n":"snmp","l":"Simple Network Management Protocol (SNMP)"},{"n":"system","l":"System-​wide services and functions"}]},{"n":"operational-datasets","l":"Network Instances","i":[{"n":"network-instances","l":"Network instances","c":false,"i":[{"n":"protocols","l":"Protocols","c":false,"i":[{"n":"bgp_summary","l":"BGP global + neighbors"},{"n":"bgp_rib","l":"BGP RIB"},{"n":"ospf_summary","l":"Show router ospf type, ID, interfaces"}]}]},{"n":"interfaces","l":"Interfaces"},{"n":"platform","l":"Platform"},{"n":"cdp","l":"Show CDP interfaces and neighbors"},{"n":"lldp","l":"Show LLDP interfaces and neighbors"},{"n":"system","l":"System"}]},{"n":"translation-framework-101","l":"Table of Contents"}]},{"n":"q_a","l":"FAQ","s":""},{"n":"glossary-of-terms","l":"Glossary of Terms"},{"n":"supported-devices","l":"List of Supported Devices"}],"s":""},{"n":"frinx-workflow-manager","l":"Frinx Workflow Manager","c":false,"i":[{"n":"introduction","l":"FRINX Workflow Manager introduction"},{"n":"create-and-modify-workflows","l":"Create and Modify Workflows and Workers"},{"n":"blueprints","l":"Device Blueprints"},{"n":"inventory","l":"Device Inventory"},{"n":"workflow-builder","l":"Workflow Builder"}],"s":""},{"n":"frinx-resource-manager","l":"Frinx Resource Manager","c":false,"i":[{"n":"introduction","l":"FRINX Resource Manager introduction"},{"n":"user-guide","l":"User Guide"},{"n":"pools","l":"Pools"},{"n":"architecture","l":"Resource Manager architecture"},{"n":"developer-guide","l":"Developer Guide"}],"s":""}],"search":{"mode":0,"minChars":2,"maxResults":20,"placeholder":"Search","hotkeys":["/"],"noResultsFoundMsg":"Sorry, no results found.","recognizeLanguages":true,"languages":[0],"preload":false}}; +var __DOCS_CONFIG__ = {"id":"U2AGgeOBCKb0GHRyvXSn7WE1aUM1322WCxp","key":"Uta4DtIcYx3DSWe/FOJWZwiFhoY3l+TPqqUjLGv/8ZU.VyRr7Slp8coWv1W4seJAyM8BT8e1RrRmcxNGgo0tadmxFcvxX9AxCM9TFzeG1W8MqNOpSl9ZZ57S56WR8jzxaQ.108305","base":"/","host":"docs.frinx.io","version":"1.0.0","useRelativePaths":true,"documentName":"index.html","appendDocumentName":false,"trailingSlash":true,"preloadSearch":false,"cacheBustingToken":"2.4.0.752074230158","cacheBustingStrategy":"query","sidebarFilterPlaceholder":"Filter","toolbarFilterPlaceholder":"Filter","showSidebarFilter":true,"filterNotFoundMsg":"No member names found containing the query \"{query}\"","maxHistoryItems":15,"homeIcon":"","access":[{"value":"public","label":"Public"},{"value":"protected","label":"Protected"}],"toolbarLinks":[{"id":"fields","label":"Fields"},{"id":"properties","label":"Properties","shortLabel":"Props"},{"id":"methods","label":"Methods"},{"id":"events","label":"Events"}],"sidebar":[{"n":"/","l":"Welcome","s":""},{"n":"frinx-machine","l":"Frinx Machine","c":false,"i":[{"n":"getting-started","l":"FRINX Machine introduction","s":""},{"n":"azure-ad","l":"Frinx Machine with Azure AD","s":""},{"n":"monitoring","l":"Monitoring with Grafana","s":""},{"n":"use-cases","l":"Demo Use Cases","i":[{"n":"add-to-inventory-and-install","l":"Add a device to inventory and install it"},{"n":"create-l2-vpn-p2p","l":"Creating a Layer 2 VPN Point-​to-​Point Connection"},{"n":"frinx-machine-demo-manual","l":"FRINX Machine Demo Manual"},{"n":"install-all-devices-from-inventory","l":"Install all devices from inventory"},{"n":"policy-filter-xr","l":"Policy filter XR"}],"s":""}],"s":""},{"n":"frinx-uniconfig","l":"Frinx Uni​Config","c":false,"i":[{"n":"getting-started","l":"Getting started","s":""},{"n":"user-guide","l":"User Guide","i":[{"n":"basic-concepts","l":"Basic Concepts"},{"n":"network-management-protocols","l":"Device Installation","i":[{"n":"uniconfig-installing","l":"Device Installation"},{"n":"uniconfig_cli","l":"Uni​Config CLI"},{"n":"uniconfig_netconf","l":"Uni​Config NETCONF","i":[{"n":"calix","l":"Calix devices"},{"n":"iosxr","l":"Cisco IOS XR devices"},{"n":"ocnos","l":"IP Infusion Oc​NOS Devices"},{"n":"junos","l":"Juniper Junos devices"},{"n":"sros","l":"Nokia SROS devices"}]},{"n":"uniconfig_snmp","l":"Uni​Config SNMP"},{"n":"updating-installation-parameters","l":"Updating installation parameters"},{"n":"uniconfig-native_cli","l":"Uni​Config-​native CLI"}]},{"n":"uniconfig-operations","l":"Uni​Config Operations","i":[{"n":"jsonb-filtering","l":"JSONB Filtering","i":[{"n":"application-jsonb-filtering","l":"Application JSONB Filtering"},{"n":"database-jsonb-filtering","l":"Database JSONB Filtering"}]},{"n":"snapshot-manager","l":"Snapshot Manager","i":[{"n":"obtain_snapshot_metadata","l":"Obtaining snapshots-​metadata"},{"n":"rpc_create-snapshot","l":"RPC create-​snapshot"},{"n":"rpc_delete-snapshot","l":"RPC delete-​snapshot"},{"n":"rpc_replace-config-with-snapshot","l":"RPC replace-​config-​with-​snapshot"}]},{"n":"subtree-manager","l":"Subtree Manager","i":[{"n":"rpc_bulk-edit","l":"RPC bulk-​edit"},{"n":"rpc_calculate-subtree-diff","l":"RPC calculate-​subtree-​diff"},{"n":"rpc_calculate-subtree-git-like-diff","l":"RPC calculate-​subtree-​git-​like-​diff"},{"n":"rpc_copy-many-to-one","l":"RPC copy-​many-​to-​one"},{"n":"rpc_copy-one-to-many","l":"RPC copy-​one-​to-​many"},{"n":"rpc_copy-one-to-one","l":"RPC copy-​one-​to-​one"}]},{"n":"transaction-log","l":"Transaction Log","i":[{"n":"rpc_revert-changes","l":"RPC revert-​changes"},{"n":"transaction-tracker","l":"Transaction tracker"}]},{"n":"uniconfig-node-manager","l":"Uni​Config Node Manager","i":[{"n":"rpc_calculate-diff","l":"RPC calculate-​diff"},{"n":"rpc_calculate-git-like-diff","l":"RPC calculate-​git-​like-​diff"},{"n":"uniconfig_check_installed_devices","l":"RPC check-​installed-​nodes"},{"n":"rpc_checked-commit","l":"RPC checked-​commit"},{"n":"rpc_commit","l":"RPC commit"},{"n":"rpc_compare-config","l":"RPC compare-​config"},{"n":"uniconfig_get_installed_devices","l":"RPC get-​installed-​nodes"},{"n":"rpc_health","l":"RPC health"},{"n":"uniconfig_install_multiple_nodes","l":"RPC install-​multiple-​nodes"},{"n":"rpc_is-in-sync","l":"RPC is-​in-​sync"},{"n":"rpc_replace-config-with-oper","l":"RPC replace-​config-​with-​operational"},{"n":"rpc_sync-from-network","l":"RPC sync-​from-​network"},{"n":"rpc_sync-to-network","l":"RPC sync-​to-​network"},{"n":"uniconfig_uninstall_multiple_nodes","l":"RPC uninstall-​multiple-​nodes"},{"n":"rpc_validate","l":"RPC validate"}]},{"n":"uniconfig-properties","l":"Uni​Config properties","i":[{"n":"rpc_read-properties","l":"RPC read-​properties"},{"n":"rpc_update-properties","l":"RPC update-​properties"}]},{"n":"utilities","l":"Utilities","i":[{"n":"openapi-diff","l":"Difference between Open​API specifications"},{"n":"yang-packager","l":"YANG packager"}]},{"n":"admin-state","l":"Admin State"},{"n":"build-and-commit-model","l":"Build-​and-​Commit Model"},{"n":"device-discovery","l":"Device Discovery"},{"n":"dryrun-manager","l":"Dry-​run manager"},{"n":"immediate-commit-model","l":"Immediate Commit Model"},{"n":"kafka-notifications","l":"Kafka Notifications"},{"n":"operational-data-about-transactions","l":"Operational data about transactions"},{"n":"templates-manager","l":"Templates Manager"},{"n":"restconf","l":"Uni​Config - Sending and receiving data (RESTCONF)"},{"n":"uniconfig-queries","l":"Uni​Config Queries"},{"n":"uniconfig-shell","l":"Uni​Config Shell"},{"n":"unistore-api","l":"Uni​Store API"},{"n":"yang-patch","l":"YANG Patch Operations"}]},{"n":"operational-procedures","l":"Operational Procedures","i":[{"n":"data-flows","l":"Data flows and transformations"},{"n":"data-security-models","l":"Data Security Models"},{"n":"logging","l":"Logging Framework"},{"n":"openapi","l":"Open​API"},{"n":"thread-pools","l":"Thread pools"},{"n":"postgres-tls","l":"TLS encryption for Postgres database"},{"n":"tls","l":"TLS-​based Authentication"},{"n":"uniconfig-clustering","l":"Uni​Config Clustering"},{"n":"uniconfig-properties","l":"Uniconfig properties"}]},{"n":"performance-and-scale","l":"Performance and scale","c":false,"i":[{"n":"performance_characteristics","l":"Performance characteristics"}]},{"n":"monitoring","l":"Monitoring"},{"n":"sdk","l":"Uni​Config Client (SDK)"}]},{"n":"developer-guide","l":"Developer Guide","i":[{"n":"architecture","l":"Architecture"},{"n":"translation-units-in-general","l":"Translation Units in general"},{"n":"translation-units-docs","l":"Translation Units Documentation for FRINX Uniconfig"},{"n":"open-config-to-device-config-mapping","l":"Open​Config to device config mapping"},{"n":"translation-unit-general-implementation","l":"Developing a new translation unit"},{"n":"cli-translation-unit","l":"Implementing CLI Translation Unit"},{"n":"netconf-translation-unit","l":"NETCONF Unified Translation Unit"},{"n":"native-cli-units","l":"Native-​CLI translation units"},{"n":"metrics","l":"Metrics"}]},{"n":"release-notes","l":"Release notes","i":[{"n":"uniconfig-5.0.7","l":"Uniconfig 5.​0.​7 Release Notes"},{"n":"uniconfig-5.0.6","l":"Uni​Config 5.​0.​6"},{"n":"uniconfig-5.0.5","l":"Uni​Config 5.​0.​5"},{"n":"uniconfig-5.0.4","l":"Uni​Config 5.​0.​4"},{"n":"uniconfig-5.0.3","l":"Uni​Config 5.​0.​3"},{"n":"uniconfig-5.0.2","l":"Uni​Config 5.​0.​2"},{"n":"uniconfig-5.0.1","l":"Uni​Config 5.​0.​1"},{"n":"uniconfig-4.2.10","l":"Uni​Config 4.​2.​10"},{"n":"uniconfig-4.2.9","l":"Uni​Config 4.​2.​9"},{"n":"uniconfig-4.2.8","l":"Uni​Config 4.​2.​8"},{"n":"uniconfig-4.2.7","l":"Uni​Config 4.​2.​7"},{"n":"uniconfig-4.2.6","l":"Uni​Config 4.​2.​6"},{"n":"uniconfig-4.2.5","l":"Uni​Config 4.​2.​5"},{"n":"uniconfig-4.2.4","l":"Uni​Config 4.​2.​4"},{"n":"uniconfig-4.2.3","l":"Uni​Config 4.​2.​3"},{"n":"uniconfig-5.0.8","l":"Uniconfig 5.​0.​8 Release Notes"},{"n":"uniconfig-5.0.9","l":"Uniconfig 5.​0.​9 Release Notes"},{"n":"uniconfig-5.0.10","l":"Uniconfig 5.​0.​10 Release Notes"},{"n":"uniconfig-5.0.11","l":"Uniconfig 5.​0.​11 Release Notes"},{"n":"uniconfig-5.0.12","l":"Uniconfig 5.​0.​12 Release Notes"},{"n":"uniconfig-5.0.13","l":"Uniconfig 5.​0.​13 Release Notes"},{"n":"uniconfig-5.0.14","l":"Uniconfig 5.​0.​14 Release Notes"},{"n":"uniconfig-5.0.15","l":"Uniconfig 5.​0.​15 Release Notes"},{"n":"uniconfig-5.0.16","l":"Uniconfig 5.​0.​16 Release Notes"},{"n":"uniconfig-5.0.17","l":"Uniconfig 5.​0.​17 Release Notes"},{"n":"uniconfig-5.0.18","l":"Uniconfig 5.​0.​18 Release Notes"},{"n":"uniconfig-5.0.19","l":"Uniconfig 5.​0.​19 Release Notes"},{"n":"uniconfig-5.0.20","l":"Uniconfig 5.​0.​20 Release Notes"},{"n":"uniconfig-5.0.21","l":"Uniconfig 5.​0.​21 Release Notes"},{"n":"uniconfig-5.0.22","l":"Uniconfig 5.​0.​22 Release Notes"},{"n":"uniconfig-5.0.23","l":"Uniconfig 5.​0.​23 Release Notes"},{"n":"uniconfig-5.0.24","l":"Uniconfig 5.​0.​24 Release Notes"},{"n":"uniconfig-5.0.25","l":"Uniconfig 5.​0.​25 Release Notes"},{"n":"uniconfig-5.1.0","l":"Uniconfig 5.​1.​0 Release Notes"},{"n":"uniconfig-5.1.1","l":"Uniconfig 5.​1.​1 Release Notes"},{"n":"uniconfig-5.1.2","l":"Uniconfig 5.​1.​2 Release Notes"},{"n":"uniconfig-5.1.3","l":"Uniconfig 5.​1.​3 Release Notes"},{"n":"uniconfig-5.1.4","l":"Uniconfig 5.​1.​4 Release Notes"},{"n":"uniconfig-5.1.5","l":"Uniconfig 5.​1.​5 Release Notes"},{"n":"uniconfig-5.1.6","l":"Uniconfig 5.​1.​6 Release Notes"},{"n":"uniconfig-5.1.7","l":"Uniconfig 5.​1.​7 Release Notes"},{"n":"uniconfig-5.1.8","l":"Uniconfig 5.​1.​8 Release Notes"},{"n":"uniconfig-5.1.9","l":"Uniconfig 5.​1.​9 Release Notes"},{"n":"uniconfig-5.1.10","l":"Uniconfig 5.​1.​10 Release Notes"},{"n":"uniconfig-5.1.11","l":"Uniconfig 5.​1.​11 Release Notes"},{"n":"uniconfig-5.1.12","l":"Uniconfig 5.​1.​12 Release Notes"},{"n":"uniconfig-5.1.13","l":"Uniconfig 5.​1.​13"},{"n":"uniconfig-5.1.14","l":"Uniconfig 5.​1.​14 Release Notes"},{"n":"uniconfig-5.2.0","l":"Uniconfig 5.​2.​0 Release Notes"},{"n":"uniconfig-5.2.1","l":"Uniconfig 5.​2.​1"},{"n":"uniconfig-5.2.2","l":"Uniconfig 5.​2.​2"}]},{"n":"translation-units-docs","l":"Translation Units","i":[{"n":"ietf-to-oc-mapping","l":"Ietf to oc mapping","c":false,"i":[{"n":"ietf_l2p2p_local_to_oc","l":"IETF L​2​VPN YANG"},{"n":"ietf_l2p2p_remote_to_oc","l":"IETF L​2​VPN YANG"},{"n":"ietf_l2vpn_to_oc","l":"IETF L​2​VPN YANG"},{"n":"ietf_l3vpn_to_oc","l":"IETF L​3​VPN YANG"}]},{"n":"configuration-datasets","l":"Interfaces","i":[{"n":"acl","l":"Acl","c":false,"i":[{"n":"acl_interfaces","l":"Access Control List"},{"n":"acl","l":"Access Control List"}]},{"n":"cable","l":"Cable","c":false,"i":[{"n":"cable_downstream_profile","l":"cable DOWNSTREAM CONTROLLER-​PROFILE"},{"n":"cable_fiber_node","l":"cable FIBER-​NODE"},{"n":"cable_rpd","l":"cable RPD"}]},{"n":"interfaces","l":"Interfaces","c":false,"i":[{"n":"bridge_interface","l":"BRIDGE interface"},{"n":"cable_interface","l":"CABLE interface"},{"n":"ethernet_interface","l":"Ethernet interface"},{"n":"l2vlan_interface","l":"L​2​VLAN interface"},{"n":"l3vlan_interface","l":"L​3 VLAN interface"},{"n":"lag_interface","l":"Link Aggregation Group (bundle) interface"},{"n":"wideband_interface","l":"WIDEBAND interface"}]},{"n":"ipsec","l":"Ipsec","c":false,"i":[{"n":"ipsec","l":"Internet Protocol Security (I​Psec)"}]},{"n":"netflow","l":"Netflow","c":false,"i":[{"n":"netflow_interfaces","l":"Net​Flow"}]},{"n":"network-instances","l":"Network instances","c":false,"i":[{"n":"l2p2p","l":"L​2​p​2​p","c":false,"i":[{"n":"connection_point","l":"L​2​P​2​P configuration"}]},{"n":"l2vpn","l":"L​2​vpn","c":false,"i":[{"n":"connection_point_l2vpn","l":"L​2​VPN (VPLS with BGP autodiscovery) configuration"}]},{"n":"l2vsi","l":"L​2​vsi","c":false,"i":[{"n":"l2vsicp","l":"L​2​VSI (L​2 virtual switch instance virtual circuit)"},{"n":"l2vsi","l":"L​2​VSI (L​2 virtual switch instance)"}]},{"n":"l3vpn","l":"L​3​vpn","c":false,"i":[{"n":"network_instance_l3vpn_bgp","l":"L​3​VPN configuration (BGP as CE-​PE protocol)"},{"n":"network_instance_l3vpn_ospf","l":"L​3​VPN configuration (OSPF as CE-​PE protocol)"}]},{"n":"mpls","l":"Mpls","c":false,"i":[{"n":"mpls_ldp","l":"Multiprotocol Label Switching - Label Distribution Protocol (MPLS LDP)"},{"n":"mpls_rsvp","l":"Multiprotocol Label Switching - Resource Reservation Protocol (MPLS RSVP)"},{"n":"mpls_te","l":"Multiprotocol Label Switching - Traffic Engineering (MPLS-​TE)"},{"n":"mpls_tunnel","l":"Multiprotocol Label Switching - Tunnel"}]},{"n":"policy-forwarding","l":"Policy forwarding","c":false,"i":[{"n":"pf_interfaces","l":"Interface policy configuration"}]},{"n":"protocols","l":"Protocols","c":false,"i":[{"n":"bgp","l":"Border Gateway Protocol (BGP)"},{"n":"isis","l":"Intermediate System to Intermediate System (IS-​IS)"},{"n":"ospf","l":"Open Shortest Path First (OSPF)"},{"n":"ospfv3","l":"Open Shortest Path First v​3 (OSP​Fv​3)"},{"n":"static","l":"Static Route"}]},{"n":"vlans","l":"Vlans","c":false,"i":[{"n":"vlan","l":"VLAN"}]},{"n":"network_instance","l":"Configure network instance (VRF)"}]},{"n":"routing-policy","l":"Routing policy","c":false,"i":[{"n":"routing-policy","l":"Routing Policy"}]},{"n":"aaa","l":"AAA - Authentication Authorization Accounting"},{"n":"bcast-containment","l":"Broadcast-​Containment (Broadcast-​containment filters)"},{"n":"cdp","l":"Configure CDP interfaces"},{"n":"fdp","l":"Configure FDP interfaces"},{"n":"stp","l":"Configure STP interfaces"},{"n":"oam","l":"Ethernet OAM / Ethernet CFM"},{"n":"evc","l":"Ethernet Virtual Circuit (EVC)"},{"n":"evpn","l":"Ethernet Virtual Private Network (EVPN)"},{"n":"hsrp","l":"Hot Standby Router Protocol (HSRP)"},{"n":"l2-cft","l":"L​2-​Cft (Layer 2 Control Frame Forwarding)"},{"n":"logging","l":"Logging (syslog)"},{"n":"privilege","l":"Privilege"},{"n":"probes","l":"Probes"},{"n":"qos","l":"Quality of Service"},{"n":"relay-agent","l":"Relay Agent"},{"n":"snmp","l":"Simple Network Management Protocol (SNMP)"},{"n":"system","l":"System-​wide services and functions"}]},{"n":"operational-datasets","l":"Network Instances","i":[{"n":"network-instances","l":"Network instances","c":false,"i":[{"n":"protocols","l":"Protocols","c":false,"i":[{"n":"bgp_summary","l":"BGP global + neighbors"},{"n":"bgp_rib","l":"BGP RIB"},{"n":"ospf_summary","l":"Show router ospf type, ID, interfaces"}]}]},{"n":"interfaces","l":"Interfaces"},{"n":"platform","l":"Platform"},{"n":"cdp","l":"Show CDP interfaces and neighbors"},{"n":"lldp","l":"Show LLDP interfaces and neighbors"},{"n":"system","l":"System"}]},{"n":"translation-framework-101","l":"Table of Contents"}]},{"n":"q_a","l":"FAQ","s":""},{"n":"glossary-of-terms","l":"Glossary of Terms"},{"n":"supported-devices","l":"List of Supported Devices"}],"s":""},{"n":"frinx-workflow-manager","l":"Frinx Workflow Manager","c":false,"i":[{"n":"introduction","l":"FRINX Workflow Manager introduction"},{"n":"create-and-modify-workflows","l":"Create and Modify Workflows and Workers"},{"n":"blueprints","l":"Device Blueprints"},{"n":"inventory","l":"Device Inventory"},{"n":"workflow-builder","l":"Workflow Builder"}],"s":""},{"n":"frinx-resource-manager","l":"Frinx Resource Manager","c":false,"i":[{"n":"introduction","l":"FRINX Resource Manager introduction"},{"n":"user-guide","l":"User Guide"},{"n":"pools","l":"Pools"},{"n":"architecture","l":"Resource Manager architecture"},{"n":"developer-guide","l":"Developer Guide"}],"s":""}],"search":{"mode":0,"minChars":2,"maxResults":20,"placeholder":"Search","hotkeys":["/"],"noResultsFoundMsg":"Sorry, no results found.","recognizeLanguages":true,"languages":[0],"preload":false}}; diff --git a/resources/js/search.json b/resources/js/search.json index 307121ead..2219f649c 100644 --- a/resources/js/search.json +++ b/resources/js/search.json @@ -1 +1 @@ -[[{"i":"welcome-to-frinx-documentation","l":"Welcome to FRINX Documentation!","p":["The FRINX documentation site contains all FRINX projects, releases and documentation. Please, use search bar in the upper left corner to find specific issues and information that you demand."]},{"l":"FRINX Machine","p":["FRINX Machine provides a platform allowing easy definition, execution and monitoring of complex workflows using FRINX UniConfig."]},{"l":"FRINX UniConfig","p":["FRINX UniConfig is a suite of applications aimed at network configuration management."]},{"l":"FRINX Workflow Manager","p":["FRINX Workflow Manager allows customers to create automated and repeatable digital processes to build, grow and operate their digital communication infrastructure."]},{"l":"FRINX Resource Manager","p":["FRINX Resource Manager helps network operators and infrastructure engineers manage their physical and logical assets and resources."]}],[{"l":"FRINX Machine introduction","p":["FRINX Machine is a dockerized deployment of multiple elements. The FRINX Machine enables large scale automation of network devices, services and retrieval of operational state data from a network. User specific workflows are designed through the use of OpenConfig NETCONF & YANG models, vendor native models, and the CLI. The FRINX Machine uses dockerized containers that are designed and tested to work together to create a user specific solution.","For installation, please refer to: FRINX Machine repository","FRINX-machine can be installed in Kubernetes using the Helm chart"]},{"l":"FRINX Machine components"},{"l":"FRINX UniConfig","p":["Connects to the devices in network","Retrieves and stores configuration from devices","Pushes configuration data to devices","Builds diffs between actual and intended config to execute atomic configuration changes","Retrieves operational data from devices","Manages transactions across one or multiple devices","Translates between CLI, vendor native, and industry standard data models (i.e. OpenConfig)","Reads and stores vendor native data models from mounted network devices (i.e YANG models)","Ensures high availability, reducing network outages and down time","Executes commands on multiple devices simultaneously"]},{"i":"netflix-conductor-workflow-engine","l":"Netflix Conductor (workflow engine)","p":["Atomic tasks are chained together into more complex workflows","Defines, executes and monitors workflows (via REST or UI)","We chose Netflix’s conductor workflow engine since it has been proven to be highly scalable open-source technology that integrates very well with FRINX UniConfig. Further information about conductor can be found at:","Sources: https://github.com/Netflix/conductor","Docs: https://netflix.github.io/conductor/"]},{"i":"elasticsearch-inventory-and-logs","l":"Elasticsearch (inventory and logs)","p":["Stores inventory data in near real-time","Stores workflow execution and meta data","Stores UniConfig logs"]},{"i":"uniconfig-ui-user-interface","l":"UniConfig UI (user interface)","p":["This is the primary user interface for the FRINX Machine","Allows users to create, edit or run workflows and monitor any open tasks","Allows users to mount devices and view their status. The UI allows users to execute UniConfig operations such as read, edit, and commit. Configurations can be pushed to or synced from the network","Inventory, workflow execution, metadata and UniConfig log files are all accessible through the UI","View inventory, workflow execution, metadata and UniConfig log files"]},{"l":"High Level Architecture","p":["Following diagram outlines main functional components in the FRINX Machine solution:","FM Architecture","FRINX Machine repository is available at https://github.com/FRINXio/FRINX-machine","Frinx-conductor repository is available at https://github.com/FRINXio/conductor"]},{"l":"Defining a workflow","p":["The workflows are defined using a JSON based domain specific language(DSL) by wiring a set of tasks together. The tasks are either control tasks (fork, conditional, etc.) or application tasks (i.e. encoding a file) that are executed on a remote device.","The FRINX Machine distribution comes pre-loaded with a number of standardized workflows","A detailed description of how to run workflows and tasks, along with examples, can be found in the official Netflix Conductor documentation"]},{"l":"Operating FRINX Machine","p":["To find out more about how to run the pre-packaged workflows, continue to Use cases"]}],[{"l":"Frinx Machine with Azure AD","p":["Frinx Machine supports authentification and authorization via Azure AD. The following sections describe how to set up Azure AD for Frinx Machine."]},{"l":"Client configuration","p":["Register the application in your Azure AD and configure the following settings."]},{"l":"Redirect URIs","p":["Cloud Postman","Cloud swagger","Frontend Login","Frontent login URI is passed to the installation script azure_ad.sh via --redirect_url flag.","https://< IP/DNS>/ ,e.g. https://localhost/","https://< IP/DNS>/oauth2-redirect.html","https://editor.swagger.io/oauth2-redirect.html","https://getpostman.com/oauth2/callback","https://oauth.pstmn.io/v1/callback","Local Postman","Platform configuration","Redirect URI","Set platform redirect URIs on the Authentication page. The table below shows examples of configuration settings.","Single-page application","Syntax","Web","Workflow Manager docs (swager)"]},{"i":"implicit-flow-and-singlemulti-tenancy-settings","l":"Implicit flow and single/multi-tenancy settings","p":["On the same page choose single/multi-tenancy. Based on this setting the parameter --tenant_name is defined in the installation script azure_ad.sh.","For a single-tenant, use Azure AD domain name from AD overview. For multi-tenant use value common. Enabled implicit flow is optional based on specific requirements.","Token config"]},{"l":"API permissions","p":["Client API permissions"]},{"l":"Client secrets","p":["Generate secret and use it as an input parameter for --client_secret flag in the installation script azure_ad.sh. This secret is used in KrakenD azure plugin for translating group id to the group name (human-readable format).","Azure client secrets"]},{"l":"Token claims configuration","p":["Example of encoded JWT token with claims. These claims are transferred to the request header (see KrakenD Azure Plugin docs for more info)."]},{"l":"RBAC configuration","p":["Super user is defined in .env file via ADMIN_GROUP variable."]},{"l":"Workflow Manager","p":["RBAC proxy adds 2 features on top of tenant proxy:","Ensures user authorization to access certain endpoints","Filters workflow definitions and workflow executions based on user's roles, groups and userID","RBAC support simply distinguishes 2 user types: an admin and everyone else. An admin has full access to workflow API while the ordinary user can only:","Read workflow definitions","Ordinary users can only view workflow definitions belonging to the same groups","A workflow definition (created by an admin) can have multiple labels assigned","A user can belong into multiple groups","User groups are identified in HTTP request's header field x-auth-user-roles","If an ordinary user's group matches one of the workflow labels, the workflow becomes visible to the user","Execute visible workflow definitions","Monitor running executions","Only those executed by the user currently logged in","Define user roles in workflow by adding role or group name to description label.","Example: added User.ReadWrite, Role.ReadWrite, Group.ReadWrite labels to workflow description."]},{"l":"Uniconfig","p":["Super-users (based on their role and user groups) can use all REST APIs. Regular users will only be able to use GET REST API requests.","Role","READ (GET REQUEST)","WRITE (ALL REQUEST)","Admin (Superuser)","true","Regular user","false"]},{"l":"Resource Manager","p":["A simple RBAC model is implemented where only super-users (based on their role and user groups) can manipulate resource types, resource pools and labels. Regular users will only be able to read the above entities, allocate and free resources.","Role","READ","WRITE","Admin (Superuser)","true","Regular user","false"]}],[{"l":"Grafana","p":["Grafana is an open source visualization and analytics software. It allows to query, visualize, alert on, and explore metrics, logs, and traces no matter where they are stored.","Grafana Login page","By default, Grafana can be accessed at localhost:3000 or 127.0.0.1:3000","Default credentials are:","Username: frinx Password: frinx123!"]},{"l":"Monitoring","p":["Grafana in FRINX Machine monitors multitude of metrics. At this time, these are:","Device monitoring","FRINX Machine logs","Node monitoring","Swarm monitoring","SSL monitoring","UniConfig-controller monitoring","Workflows monitoring"]},{"l":"Device Monitoring","p":["This dashboard displays data on a specific installed device/node."]},{"l":"FRINX Machine Logs","p":["This dashboard monitors all services running in FRINX Machine. You can filter by individual services, and also look for a specific value.","Logs Monitoring"]},{"l":"FRINX Machine Node Monitoring","p":["This dashboard monitors the state of VM/System where FRINX Machine is running. It reports info like CPU utilisation, Memory utilisation, Disk usage, Up-time etc.","Node Monitoring"]},{"l":"FRINX Machine Swarm Monitoring","p":["This dashboard monitors metrics specifically tied to FM within the VM/System. Metrics like Up-time, Available/Utilised memory, Number of running/stopped containers, CPU usage per container, Memory usage per container, I ncoming/Outcoming network traffic, etc.","Swarm Monitoring"]},{"l":"SSL Monitoring","p":["This dashboard displays data about your SSL certificates. It displays dates until your certificates are valid."]},{"l":"UniConfig Controller Monitoring","p":["This dashboard keeps track of various UniConfig transactions. It displays number of transactions at a given time."]},{"l":"Workflows Monitoring","p":["Collecting data on workflows is being worked on."]}],[{"l":"Demo Use Cases","p":["There are several ways of installing device/devices in FRINX Machine. You can either run a workflow to install a network device directly or you can add devices to your Kibana inventory and install devices from there. From your Kibana inventory, you can install a single device, but you can also install every device in the inventory simultaneously.","To start installing devices open up FRINX UniConfig UI."]},{"l":"Open FRINX UniConfig UI","p":["Open your browser and go to [host_ip] if installed locally go to https://localhost. This is the GUI (UniConfig UI) for managing all of your devices. You should see a screen like this:","FM 2.0 Dashboard","For Demo Use Cases, please download repository fm-workflows","Make sure FRINX-machine is running, navigate to","and execute","Imported workflows and tasks will appear in FRINX-Machine UI, immediately after the import finishes.","In the following articles, you'll learn how to install a device from UniConfig and how to install all devices from the inventory. This inventory is automatically set up for you when you start FRINX Machine. After that we'll learn how to create a loopback address on the devices that we previously stored in the inventory and how to read the journals of these devices.","Then we'll take a look at how to obtain platform inventory data from the devices that you have in the network and how to store them in inventory. Next, you'll learn how to save commands to your inventory and execute them on the devices that are in your network.","Lastly, we'll take a look at how you can add devices to your inventory manually. This might be useful if you wanted to play around with the FRINX Machine a bit a try installing your own networking devices."]}],[{"l":"Add a device to inventory and install it"},{"l":"Adding device to inventory","p":["To add a new device to inventory, click on the Add device button in the Device inventory tab.","Add device to inventory"]},{"l":"JSON examples","p":["New devices are added to inventory by JSON code snippets. They are similar to Blueprints with one addition: device_id must be specified in the snippet.","To add a new device from Blueprint, toggle the \"Blueprints\" switch in the form and choose the blueprint that you want to use."]},{"i":"cisco-classic-ios-cli","l":"Cisco classic IOS (cli)"},{"i":"cisco-ios-xr-netconf","l":"Cisco IOS XR (netconf)"},{"i":"huawei-cli","l":"Huawei (cli)"},{"i":"calix-netconf","l":"CALIX (netconf)"},{"i":"nokia-netconf","l":"Nokia (netconf)"},{"l":"Install the new device from Inventory","p":["Now that the device is added we can install it. We used to need dedicated workflow to install device form inventory, but now it can be done purely via UI. Click on Explore in Explore & configure devices tab, under Device Inventory section.","Install device from inventory","If you did everything correctly, your devices is now in inventory and installed, ready to be operated through Frinx Machine."]}],[{"l":"Creating a Layer 2 VPN Point-to-Point Connection","p":["This section details how to find and execute a prebuilt workflow that creates a Layer 2 VPN Point-to-Point connection within Workflow Manager."]},{"l":"Navigating through Workflow Manager","p":["From the FRINX Machine dashboard you can either select Workflow Manager--> Explore Workflows--> Explore, or select the menu tab in the upper left-hand corner and select Workflow Manager.","You can then search for Create_L2VPN_P2P_OC_uniconfig or scroll down to find it within the inventory of prebuilt workflows.","Frinx Machine Dashboard","Workflows Dashboard","Once you have located the workflow press the Play button to the right of the workflow, this will navigate you to the workflow configuration window."]},{"l":"Configuring the Workflow","p":["Input is pre-filled with following data:","L2 VPN Configuration","Once you have completed, press the Execute button, a numeric link will populate to the left of the Execute button. Click on this numeric link to see the output of the executed workflow.","Numeric Link"]},{"l":"Output of the Executed Workflow","p":["On the Workflows page you will see your executed workflows.","Select the workflow Create_L2VPN_P2P_OC_uniconfig to see the output from all of the tasks completed within this workflow.","Executed Workflow Details","This following sections are available within the output window:","Task Details: This tab gives a detailed list of the individual tasks executed within the conductor, a log of each tasks start and end time, and a status of 'Completed' or 'Failed'.","Input/Output: This is the input of the API call and the results from the API call.","JSON: This tab gives a detailed output in JSON format of all executed tasks from within the workflow. Select the Unescape button to make the output more user-friendly to read.","Edit Rerun: Allows you to make changes to your initial workflow, creating a new workflow without effecting the original.","Execution Flow: A structured map from the conductor lays out the path of tasks executed from start to finish, any forks in the path are also shown here.","If you click on any of the tasks you will receive a pop-up window that gives:","The option to review a summary of input and output of the API call.","JSON output of the completed task with that goes into greater detail about the task execution.","Log status."]},{"l":"Sub-Workflows","p":["Within the original Details of Create_L2VPN_P2P_OC_uniconfig window you will see a sub-workflow.","Sub-Workflow","This sub-workflow is an embedded task that makes a separate API call to Slack to notify a pre-defined user group that the workflow has been executed and whether it has succeeded or failed."]}],[{"l":"FRINX Machine Demo Manual","p":["Open the Frinx Demo at https://services.frinx.io/frinxui/. (Note that Mozilla Firefox is not supported.)","Select Login in the upper-right corner to log into the service. Please contact info@frinx.io for login credentials.","After logging in, you can see the FRINX Machine dashboard:","FRINX Machine dashboard"]},{"l":"Demo Config Manager UI","p":["Using the Demo Config Manager:","On the FRINX Machine main page, select Explore & configure devices.","Make sure that the device you want to configure is installed. If not, select Install first.","For this demo, we use the IOS01 device. Locate the device in the list and select the corresponding gear icon on the right. (If you see a message saying Transaction expired, select Refresh).","FRINX Machine dashboard","For the Loopback0 interface, change the enabled status to false.","Select Save to save your changes.","To review your changes, select Calculate diff.","To view the set of commands used for the change, select Dry run.","To apply changes to the device, select Commit to network. You can also see the changes in the Operational data store.","To revert changes made to the device configuration:","Select Transactions.","Select the Revert icon for your transaction.","Select Revert changes."]},{"l":"Demo workflow UI basics","p":["Workflow Builder is a graphical interface for Workflow Manager and is used to create, modify and manage workflows.","Workflows are groups of tasks and/or sub-workflows that can be used, for example, to install or delete devices, create loopback interfaces on devices, send messages and much more. You can create your own workflows or edit existing ones by adding or removing tasks or sub-workflows.","Every task and sub-workflow placed in a workflow has a unique reference alias, and no two workflows can share a name and version."]},{"l":"How to create a new custom workflow","p":["A translation of what is happening here: \"If the identified device is of the type saos, then extract the name from the output message of the previous task, change the letters to uppercase, extract the version from the output message of the previous task, glue them together and add _1(because that is how devices are named in this demo topology\".","Above every task or workflow there are two icons:","As above, if we enter the username and password directly, the workflow will not ask for credentials at startup.","decision task: Makes a different kind of decision from the lambda task discussed above. This task works like a switch on a track, sending the train one way or another. The data needed to make a decision is supplied by the lambda task.","Device_identification task:","Enter details for the new workflow. Under Name, enter a name for your workflow (note that this name cannot be changed later). The Description is for additional information about the workflow and can be left empty. Label can help you to find your workflow later under Explore workflows, but can also be left empty. Select Save changes when ready.","Enter the following into the body:","Finding your new workflow and running it with multiple different inputs such as 10 000, 10 002, 10 012, etc.","For different ports, you can see different devices with other run commands in memory.","FRINX Machine dashboard","FRINX Machine dashboard FRINX Machine dashboard","If the input value for decision is other, it directs the flow towards device_identification. If the input value is false, it directs the flow towards terminate. This corresponds to the way we connected the cells in the workflow builder.","In the Input parameters tab and the Lambda value field, enter: ${workflow.input.port}. This indicates that the task should work with what was entered in the port field in the input of this workflow. (We will cover this later, in section 7.)","In the Input parameters tab under management_ip, enter sample-topology. This is the name of the topology in this installation, whereas in production you would use a real name. For port, enter ${workflow.input.port}. If you enter a port number manually, the workflow will not ask for one when started (the same goes for management_ip and other fields). However, we want the user to be able to select a port they are interested in, as we did with the lambda task in section 4.","In the Input parameters tab, delete the default parameter foo. For the param parameter, enter ${lambda_IkSu.output.result.value}. (Note that IkSu is an automatically generated reference alias that you must edit to match the one generated for you.) What ${lambda_IkSu.output.result.value} means is to take the value from lambda_xyzq which is in the output, find the result in the output and the value in it.","In the Input parameters tab, enter COMPLETED(or FAILED, at your discretion) in the Termination status field. You can enter whatever message you want in the Expected workflow output field (for example, Device not supported.)","In the Script expression field, enter a small function which we described above.","In this case, if the specified port is both greater than or equal to 10000 and less than 10005, the status chosen is keep working. Otherwise, the status is end. This status is the output of the lambda and the input for the next task or sub-workflow.","lambda task: Makes a decision on which status to choose based on the embedded port. In this example we will only consider ports 10000–10004, and others are ignored. The lambda task lets you enter a small code (lambda - function without name) into the workflow builder.","Like we mentioned above, in this demo workflow we will assume that login credentials are the same everywhere.","Next steps:","Now we can add more tasks. In the left column under System tasks, we can add another lambda. In the Workflows section, you can find Read_journal_cli_device. Let us place them next to each other after Device_identification and concatenate them:","Now we can create a new workflow from scratch:","password: ${workflow.input.password}","Read_journal_cli_device: In the Input parameters tab under device_id, enter ${lambda_ZW66.output.result}.","Remove/Expand:","Save and run your workflow.","Second lambda: Enter ${Device_identificationRef_f7I6.output} as the lambda value, meaning \"take the output from the previous Device_identification task and use that\".","Select Create on the main page of FRINX Machine.","Sub-workflows are similar to classic workflows, but inside of another workflow. The workflow that we are creating can also be used as a building block for another workflow, becoming a sub-workflow itself. In this manner, we can layer and reuse previously created workflows.","terminated task:","The output from Read_journal_cli_device is concatenated with END, as is the output from terminated. Thus we have closed our custom workflow.","Under System tasks, click the + sign for the lambda, decision and terminate tasks. Under Workflows, click the + sign for Device_identification. Tasks and sub-workflows are added on top of each other on the canvas and can be dragged around. To connect all parts of the workflow, hover over IN and OUT where the + sign appears. Connect the parts as follows: START- lambda- decision- (other) to Device_identification and default to terminate. Each task and workflow has a reference alias after its name, which works as unique a identifier.","Update:","username and password: For this demo, we assume that the following login credentials are used on all devices: username: frinx and password: frinx","username: ${workflow.input.username}","When working with devices using different login credentials, you need to be able to change or enter them at startup. This can be achieved in the same way as with the port parameter:"]},{"i":"demo-creating-a-loopback-address-on-devices-stored-in-the-inventory","l":"Demo: Creating a loopback address on devices stored in the inventory","p":["This workflow creates a loopback interface on all devices installed in the inventory or on all devices filtered by labels. Labels are markers that serve as a differentiator.","Check if all devices are installed. You can install them manually or by executing the Install_all_from_inventory / 1 workflow.","FRINX Machine dashboard","On the main page, select Explore workflows. In the Search by keyword column, enter loopback. The Create_loopback_all_in_uniconfig / 1 workflow will appear in the list. Under Actions, select the corresponding Run button for the workflow.","Under loopback_id, insert 77 and select Execute. Click on the link that appears.","All tasks were executed correctly and are completed.","On the results page, you will see five individual tasks:"]},{"l":"INVENTORY_get_all_devices_as_dynamic_fork_tasks","p":["This workflow displays a list of all devices in the inventory or devices filtered by label. It parses the output in the correct format for the dynamic fork, which creates a number of tasks depending on the number of devices in the inventory."]},{"l":"SUB_WORKFLOW","p":["This is the dynamic fork sub-workflow. In this case, it creates UNICONFIG_write_structured_device_data for every individual device in the inventory. You can then get detailed information on the progress and succession of every device."]},{"l":"UNICONFIG_calculate_diff","p":["This remote procedure call creates a difference between the actual UniConfig topology devices and the intended UniConfig topology nodes."]},{"l":"UNICONFIG_dryrun_commit","p":["This remote procedure call resolves the difference between actual and intended device configurations. After all changes are applied, the cli-dryrun journal is read and a remote procedure call output is created and returned."]},{"l":"UNICONFIG_commit","p":["This is the final task that actually commits the intended configuration to the devices."]},{"i":"demo-l3vpn","l":"Demo “L3VPN”","p":["On the FRINX Dashboard, open menu in the top-left corner and select on L3VPN Automation.","Select Services.","Select + Add service.","Fill in the information as shown below. Select the chain icon to automatically generate the VPN ID.","FRINX Machine dashboard","Select Save changes.","You are redirected to the previous page.","Select Commit changes.","Select Commit changes again.","After committing, you can see all executed tasks and sub-workflows. Select Go to detail to review individual processes."]},{"i":"step-1","l":"Step 1.","p":["Navigate back to the L3VPN Automation page.","Select Sites.","Locate the test_site_3b9UQL4i entry.","FRINX Machine dashboard","For test_site_3b9UQL4i, select Manage and Site network access.","Select Add network access."]},{"i":"step-2","l":"Step 2.","p":["BFD Profile: 500ms","Bgp Profiles: 300ms","BTM Circuit Reference: CES00000000-05","Devices: Select one of the CPE devices.","Enter the following settings:","FRINX Machine dashboard","General and Service","IP Connection","Maximum Routes: 2000","Routing Protocol:","Select + Create Static Protocol.","Select Save Changes.","Static Routing Lan Tag: 999","Static Routing LAN: 10.0.0.0/8","Static Routing Next Hop: 10.0.0.1","SVC Input Bandwith (Mbsp): 1000","SVC Output Bandwith (Mbps): 1000","To automatically generate a provider and customer address, select the chain icon:","VPN Attachment: GNS00001002"]},{"i":"step-3","l":"Step 3.","p":["Select Commit Changes.","FRINX Machine dashboard","Wait until all tasks are completed."]}],[{"l":"Install all devices from inventory","p":["When adding multiple devices to your inventory, it can be tedious to install them individually. To make things easier, we have built a workflow to install all devices present in the inventory.","Follow these instructions to use the workflow:","On the landing page, select Workflow Manager. Then select Explore and search for the workflow called Install_all_from_inventory.","Search for install_all_from_inventory","After searching, select the Execute button (blue play icon). A window appears where you can enter the input parameter. This workflow does not require any input if you want to install all uninstalled devices. If you specified a device label when adding devices, you can use this label to determine which devices should be bulk installed. Select \"Execute\" again.","Execute install_all_from_inventory","After you execute, a numeric link appears to the left of the Execute button. The link takes you to a page that shows individual tasks for this workflow, its inputs and outputs, and whether it was successful or unsuccessful. In the \"Input/Output\" tab, you can see both devices that were installed as a result of this workflow and those that were already installed.","Results of the workflow"]}],[{"l":"Policy filter XR","p":["This workflow uses UniConfig to showcase the filtering capabilities of some of our system tasks. It filters through the interfaces of the device, returns the name of the interface based on its description provided by the user and applies chosen policy on that interface.","Supported device: ios-xr -> IOSXR653_1, IOSXR653_2 & IOSXR663_1 not IOS01 & IOS02","Policy creation is not part of this workflow. The chosen policy must exist on the device before running this workflow."]},{"l":"Searching the workflow","p":["Search"]},{"i":"sync--replace","l":"Sync & Replace","p":["We consider it best practice for all workflows that interact with devices to start with the tasks \"Sync from network\" and \"Replace config with oper\". This ensures that the internal databases of the FRINX Machine are in sync with the latest configuration of the device. The input of these tasks is simply the name of the node(device)."]},{"l":"Read device data","p":["The next part is reading the device config. In the UNICONFIG_read_structured_device_data task, you can specify which part of the config you want to read with URI. In this case, we leave the\"URI\" input field empty."]},{"l":"jsonJQ filter","p":["jsonJQ is one of our system tasks that is very useful for filtering data. We use the following query expression:","We search through the whole config, and under the Cisco-IOS-XR-ifmgr-cfg:interface-configurations model we find the interface with a description given by the user. The task returns the name of that interface."]},{"l":"Lambda","p":["Lambda is a generic task that can process any JS code. In this case, we use it to parse the output of the jsonJQ task. jsonJQ returns the name of the interface in a standard decoded format, e.g: \"TenGigE0/0/0/0\". However, we will be using that interface in URI, which means it must be encoded. We can achieve that using a simple JS script:","As an example, we take the interface name TenGigE0/0/0/0 and encode it to TenGigE0%2F0%2F0%2F0."]},{"i":"write--commit","l":"Write & commit","p":["Lastly, we use the output of the lambda task for the configuration. We apply a policy to the interface filtered based on its description."]},{"l":"Example input","p":["Input"]},{"l":"Execution flow"},{"l":"Run of the workflow","p":["Running the workflow","IOSXR653_1 test_map_custom"]}],[{"l":"FRINX UniConfig introduction","p":["The purpose of UniConfig is to manage configuration state and to retrieve operational state of physical and virtual networking devices. UniConfig provides a single API for many different devices in the network. UniConfig can be run as an application on bare metal in a VM or in a container, standalone or as part of our automation solution FRINX Machine. UniConfig has a built-in data store that can be run in memory or with an external database.","UniConfig features"]},{"l":"UniConfig key feature overview","p":["A 'Lazy CLI' feature to suspend and resume connections without having to maintain keepalives","Allows for diffs to be built between actual and intended execution of atomic configuration changes","Can execute commands in parallel on multiple devices","Can read and store proprietary data models from network devices that follow the YANG data model","Choose between NETCONF or RESTCONF to connect to devices","Data export and import via blacklist and whitelist functions","High availability","Offers the ability to do a dry-commit to evaluate the functionality of a configuration before it is executed on devices","Provides snapshots of previous configurations if you need to rollback","Provides subtree filtering capabilities in NETCONF","Provides templates for device configuration","Pushes configuration data to devices via NETCONF or CLI","Python microservices are used to integrate with the FRINX machine","Retrieves and stores current startup and running configuration from mounted network devices","Retrieves operational data from devices via NETCONF or CLI","Subscription to NETCONF notifications via web sockets","Support for 3-phase commit by using NETCONF confirmed-commit","Support for YANG 1.1 and Tail-f actions","Supports PostgreSQL as an external database","The ability to log specific devices as needed","The UniConfig client allows for simple, full-service access to the UniConfig features","The UniConfig UI allows users to interact with the network controller through a web-based user interface","Transactions can be managed on one or multiple devices","Translates between CLI, native model and standard data models (i.e. OpenConfig) via our open-source device library( https://github.com/FRINXio/cli-units)"]},{"i":"uniconfig-enables-users-to-communicate-with-their-network-infrastructure-via-four-options","l":"UniConfig enables users to communicate with their network infrastructure via four options:","p":["Execute & Read API- Unstructured data via SSH and Telnet","OpenConfig API– Translation provided by our open source device library","UniConfig Native API– Direct access to vendor specific YANG data models that are native to the connected devices as well as UniConfig functions (i.e. diff, commit, snapshots, etc.)","UniConfig Native CLI API– Programmatic access to the CLI without the need for translation units (experimental)","Execute & Read capable API: Like Ansible, TCL Scripting or similar products strings can be passed and received through SSH or Telnet via REST API. UniConfig provides the authentication and transportation of data without interpreting it.","OpenConfig API: An API that is translated into device specific CLI or YANG data models. The installation of \"translation units\" on devices is required. FRINX provides an open source library of devices from a variety of network vendors. The open source framework allows anyone to contribute or consume the contents of the expanding list of supported network devices.","UniConfig Native API: A vendor specific YANG data models are absorbed by UniConfig to allow configuration of mounted devices. UniConfig maps vendor specific \"native\" models into it's data store to provide stateful configuration capabilities to applications and users.","UniConfig Native CLI API: Allows for interaction with a devices CLI is programmatic through the API without the use of 'translation units', only a schema file is needed. (This option is currently experimental, contact FRINX for more information.)","UniConfig solution"]},{"l":"UniConfig in a Docker container"},{"l":"Download and activate FRINX UniConfig","p":["Enter the following commands to download, activate and start UniConfig in a Docker container:"]},{"l":"Stop the container","p":["To stop the container type:"]},{"l":"UniConfig as a Java process in a VM or on a host"},{"l":"Download FRINX UniConfig","p":["Click on the link to download a zip archive of the latest FRINX UniConfig: uniconfig-5.0.7.zip By downloading the file you accept the FRINX software agreement: EULA"]},{"l":"Activate FRINX UniConfig","p":["To activate UniConfig, unzip the file, open the directory and run the following command:","For more information on the different arguments run the startup script with the -h flag"]},{"l":"OpenAPI","p":["UniConfig distributions contain '.yaml' file that generates list of all usable RPCs and their examples. You can view it locally or on our hosted version that always shows latest OpenAPI version.","File can be found here:"]},{"l":"Offline Activation","p":["Please contact support@frinx.io for offline activation of UniConfig."]}],[{"l":"User Guide"},{"l":"Basic Concepts","p":["Explanation of basic concepts, principles and mechanisms that exist within UniConfig."]},{"l":"Device Installation","p":["Section that explains device installation process. It covers basic mechanisms that take place when installing and explains parameters that are used in installation along with examples of install request examples. It then covers differences between CLI and NETCONF API."]},{"l":"UniConfig Operations","p":["This section lists various APIs used interact with UniConfig."]},{"l":"UniConfig Procedures","p":["UniConfig operations are actions that are usually inherent to UniConfig and work on their own when set up properly."]},{"l":"SDK","p":["Uniconfig provides a full blown Java based SDK. All Uniconfig operations available over RESTconf are also available when using the SDK."]}],[{"l":"Basic Concepts","p":["UniConfig is a network controller that enables network operators to automate simple and complex procedures in their heterogeneous networks. UniConfig uses CLI, NETCONF and gNMI to connect to network devices and provides a RESTCONF interface on its northbound to provide an API to applications. UniConfig users use clients in various programming languages to communicate from their applications with the controller. FRINX provides a Java client and python workers to integrate with its workflow automation in FRINX Machine. Other clients can be generated from the OpenAPI documentation of the UniConfig API.","UniConfig is stateless and stores all state information before and after transactions in a PostgreSQL database. UniConfig provides transaction capabilities on its northbound API, so that multiple clients can interact with UniConfig at the same time in a well-structured way. In addition, transactions are also supported towards all network devices independent of the capabilities of these devices. Transactions can be rolled back on error automatically and on user demand by specifying a transaction ID from the transaction log. Clients can use an “immediate commit” model (changes sent to the controller get applied to the devices immediately) or a “build and commit” model (changes are staged on the controller until a commit operation pushes all changes in a transaction to one or multiple devices).","To support N+1 redundancy and horizontal scale (meaning adding more controller instances allows the system to serve more network devices and more clients) UniConfig can be deployed together with a load balancer(E.g.: Traefik). The combination of a state-less load balancer and multiple UniConfig instances achieves high availability and supports many network devices and client applications to configure the network.","An open-source device library allows users to connect UniConfig to CLI devices that do not support SDN protocols like NETCONF and gNMI. This library is open to users, independent software vendors and any 3rd party to contribute to and use to achieve their automation goals.","Finally, the UniConfig shell, allows users to interact with all UniConfig operations and the connected devices in a model driven way through CLI.","UniConfig runs in containers, VMs or as application and can be deployed stand-alone or as part of the \"FRINX Machine\" network automation solution."]}],[{"l":"Device installation"},{"i":"device-installation-1","l":"Device installation","p":["Guide explaining installation mechanisms along with both CLI and NETCONF examples."]},{"l":"UniConfig CLI","p":["The CLI southbound plugin enables the Frinx UniConfig to communicate with CLI devices that do not speak NETCONF or any other programmatic API. The CLI service module uses YANG models and implements a translation logic to send and receive structured data to and from CLI devices."]},{"l":"UniConfig Netconf","p":["NETCONF is an Internet Engineering Task Force (IETF) protocol used for configuration and monitoring devices in the network. It can be used to“create, recover, update, and delete configurations of network devices”.NETCONF operations are overlaid on the Remote Procedure Call(RPC) layer and may be described in either XML or JSON."]},{"l":"UniConfig-native CLI","p":["UniConfig-native CLI allows user configuration of CLI-enabled devices using YANG models that describe configuration commands. In UniConfig-native CLI deployment translation units are defined only by YANG models and device-specific characteristics that are used for parsing and serialization of commands. Afterwards, readers and writers are automatically created and provided to translation registry - user doesn’t write them individually. YANG models can be constructed by following of well-defined rules that are explained in Developer Guide.","Network management protocols are used in southbound API of UniConfig Lighty distribution for device installation and communication. Currently, following protocols are supported:","NETCONF (Network Configuration Protocol)","SSH / TELNET"]}],[{"l":"Device installation","p":["Installing is the process of loading device information into UniConfig database. This information is saved in PostgreSQL database and used whenever transaction occurs. When the transaction is finished the connection to device is closed again, until next transaction.","These are the steps of installation process:","creation of UniConfig transaction","creation of mountpoint - connection to device","loading configuration and metadata from mountpoint","closing mountpoint and connection to device","storing synced configuration and metadata to database","closing UniConfig transaction","Node can be installed only once (you will receive error if node has already been installed).","You can specify if you would like to install node on the UniConfig layer. Default value is 'true':","Only 1 node with the same node-id can be installed on UniConfig layer.","It is synchronous: it succeeds only after node is successfully installed it fails in other cases – max-connection-attempts is automatically set to value '1', if different value is not provided in RPC input, database or config file.","Following sections provide deeper explanation of parameters needed for installation, along with example install requests.","Overview of our OpenAPI along with all parameters and expected returns can be found here."]},{"l":"Default parameters","p":["All install parameters (CLI/NETCONF) are set in database when Uniconfig is initializing. Values of these parameters are equal to specific yang model default values. These parameters are used when they are missing in RPC request.","Priority of using install parameters :","Parameter set in install RPC request","Parameter set in database","Default parameter from yang model","Priority of initial writing default parameters into database:","Database already contains default parameters","User defines default parameters into config file","Default values from yang schema file will be saved","Default parameters can be managed (put/read/delete) by user using RESTCONF/Uniconfig-shell.","Definition of default parameters can be also done using config file default-parameters.json. It is placed in config subdirectory together with lighty-uniconfig-config.json.","RPC request - CLI default parameters:","RPC request - NETCONF default parameters:"]},{"l":"Installing CLI device","p":["Install node RPC","List of basic connection parameters that are used for identification of remote device. All of these parameters are mandatory.","node-id- Name of node that represents device in the topology.","cli-topology:host- IP address or domain-name of target device that runs SSH or Telnet server.","cli-topology:port- TCP port on which the SSH or Telnet server on remote device is listening to incoming connections. Standard SSH port is '22', standard Telnet port is '23'.","cli-topology:transport-type- Application protocol used for communication with device - supported options are 'ssh' and 'telnet'.","cli-topology:device-type- Device type that is used for selection of translation units that maps device configuration to OpenConfig models. Supported devices can be found","cli-topology:device-version- Version of device. Use a specific version or * for a generic one. * enables only basic read and write management without the support of OpenConfig models. Here.","cli-topology:username- Username for accessing of CLI management line.","cli-topology:password- Password assigned to username.","uniconfig-config:install-uniconfig-node-enabled- Whether node should be installed to UniConfig and unified layers. By default, this flag is set to 'true'."]},{"l":"Authentication parameters","p":["List of authentication parameters used for identification of remote user utilized for configuration of the device. Username and password parameters are mandatory.","cli-topology:username- Username for accessing of CLI management line.","cli-topology:password- Password assigned to username.","List of parameters that can be used for adjusting of reconnection strategy. None of these parameters is mandatory - if they are not set, default values are set. There are two exclusive groups of parameters based on selected reconnection strategy - you can define only parameters from single group. By default, keepalive strategy is used."]},{"l":"Connection parameters","p":["Following parameters adjust maintaining of CLI session state. None of these parameters are mandatory (default values will be used).","cli-topology:max-connection-attempts- Maximum number of initial connection attempts(default value: 1). If there are unstable devices in the network it might be useful to provide max-connection-attempts higher than the default value. It would try to connect n times before throwing an ssh connection exception.","cli-topology:max-connection-attempts-install- Maximum number of initial connection attempts during install process (default value: 1). If there are unstable devices in the network it might be useful to provide max-connection-attempts-install higher than the default value. It would try to connect n times before throwing an ssh connection exception.","cli-topology:max-reconnection-attempts- Maximum number of reconnection attempts(default value: 1). max-reconnection-attempts is not that necessary to set. Uniconfig does not keep idle sessions open longer than it is necessary."]},{"l":"Storing failed installations","p":["The following parameter allows the user to store the installation in case the device is in some way unreachable.","uniconfig-config:store-failed-installation- If enabled, it will ensure that even if the device is unreachable, it will be stored in the node table in the database. If not set, the default value is false.","When the user sets the flag to true, an additional column called installation-status will be populated with a boolean flag (either SUCCESSFUL for a successful installation, or FAILED for a failed one). This lets the user know that there has been some problem and that the device was not installed correctly. The mount-point information of that node will be stored (unlike with the default value). With this info already stored, the user does not need to reinstall the device, as all the connection information is present in the UniConfig database. Syncing the device or calling a GET Request will try to reconnect to the device and if it is successful, the configuration data will be saved in the datastore and the request will then finish. The installation-status will then change to SUCCESSFUL. The installed device will then behave normally as if the installation was successful in the first place. If the device is still unreachable, the flag will stay FAILED.","This is useful when many devices are being installed in batches and the user doesn't know if they are up or not."]},{"l":"Keepalive strategies","p":["1. Keepalive reconnection strategy","cli-topology:keepalive-delay- Delay between sending of keepalive messages over CLI session. The value should not be set higher than the execution of the longest operation. Default value: 60 seconds.","cli-topology:keepalive-timeout- This parameter defines how much time the CLI layer should wait for a response to keepalive message before the session is closed. Default value: 60 seconds.","cli-topology:keepalive-initial-delay- This parameter defines how much time CLI layer waits for establishment of new CLI session before the first reconnection attempt is launched. Default value: 120 seconds.","The keepalive parameters have two main functions:","keep the idle session open","timeout commands which would block the session forever"]},{"l":"Example of using the connection and keepalive parameters together","p":["For this example let us assume that we are dealing with a prod-like device, which would mean that some devices might have a large config. We would set these parameters:","Connection attempts would give us more flexibility if we work with unstable devices. It would try to ssh 3 times instead of 1 (default value). We should also keep in mind that the process of connecting to a device would take longer because of extra ssh attempts.","Keepalive commands can be set less than time of the installation, because keepalive commands can fit in between of the installation process. An important thing to keep in mind is to set sum of keepalive-delay and keepalive-timeout parameters higher than time of execution of the configuration show command. Otherwise, it could time out during writing out of the configuration to the console. For each type of device it is a different command ( configuration show brief for Ciena devices, show run for Cisco devices, etc.). Assumption is that it should not take more than 240 seconds (sum of keepalive params) to show the whole configuration. This can be appropriately adjusted to our circumstances.","2. Lazy reconnection strategy","command-timeout- Maximal time (in seconds) for command execution. If a command cannot be executed on a device in this time, the execution is considered a failure. Default value: 60 seconds.","connection-establish-timeout- Maximal time (in seconds) for connection establishment. If a connection attempt fails in this time, the attempt is considered a failure. Default value: 60 seconds.","connection-lazy-timeout- Maximal time (in seconds) for connection to keep alive. If no activity was detected in the session and the timeout has been reached, connection will be stopped. Default value: 60 seconds."]},{"l":"Journaling parameters","p":["The following parameters relate with tracing of executed commands. It is not required to set these parameters.","cli-topology:journal-size- Size of the cli mount-point journal. Journal keeps track of executed commands and makes them available for users/apps for debugging purposes. Value 0 disables journaling(it is default value).","cli-topology:dry-run-journal-size- Creates dry-run mount-point and defines number of commands in command history for dry-run mount-point. Value 0 disables dry-run functionality (it is default value).","cli-topology:journal-level- Sets how much information should be stored in the journal. Option 'command-only' stores only the actual commands executed on device. Option 'extended' records additional information such as: transaction life-cycle, which handlers were invoked etc."]},{"l":"Parsing parameters","p":["Parsing strategies are used for:","Recognizing of structure in cached device configuration that is represented in textual format.","Extraction of target sections from structured format of device configuration.","Parsing engine can be configured on creation of mountpoint by specification of parsing-engine leaf value. Currently, there are three supported CLI parsing strategies: tree-parser(default strategy), batch-parser and one-line-parser.","Both batch-parser and tree-parser depend on current implementation of'CliFlavour' which defines device-specific CLI patterns. For example, if 'CliFlavour' doesn't correctly specify format of 'show configuration' command, then neither batch-parser or tree-parser is applied and commands are sent directly to device."]},{"l":"Tree-parser","p":["It is set as default parsing engine in case you choose to not use'parsing-engine' parameter.","Running-configuration is mapped into the tree structure before the first command lookup is executed from translation unit. Afterwards, this tree can be reused in the same transaction for faster lookup process (for example, one 'sync-from-network' task is executed in one transaction).","Tree-parser is faster than batch-parser in most cases because device configuration must be traversed only once and searching for target section in parsed tree structure has only logarithmic time complexity. The longer the device configuration is, the better performance improvement is achieved using this parsing strategy.","Both batch-parser and tree-parser should be capable to parse the same device configurations (in other words, tree-parser doesn't have any functional restrictions in comparison to batch-parser)."]},{"l":"Batch-parser","p":["Running-configuration must be traversed from the beginning each time when new target section is extracted from the configuration (such lookup process is launched from CLI translation units).","Internally, this parser uses regular expressions to recognize structure of configuration and find target section. From this reason, if configuration is long, this batch-parser becomes ineffective to extract sections that are placed near the end of device configuration.","Batch-parser should be used only as fallback strategy in the case when tree-parser fails."]},{"l":"One-line-parser","p":["CLI parsing engine that stores configuration in the cache in the form of blocks and then uses grep function for parsing running-configuration"]},{"l":"Cisco IOX XR Example request"},{"l":"Junos Example request"},{"l":"Uninstalling CLI device","p":["Uninstall node RPC"]},{"l":"Example request"},{"l":"Installing Netconf device"},{"l":"Identification of remote device","p":["List of basic connection parameters that are used for identification of remote device. Only tcp-only parameter must not be specified in input of the request.","node-id- Name of node that represents device / mount-point in the topology.","netconf-node-topology:host- IP address or domain-name of target device that runs NETCONF server.","netconf-node-topology:port- TCP port on which NETCONF server is listening to incoming connections.","netconf-node-topology:tcp-only- If it is set to 'true', NETCONF session is created directly on top of TCP connection. Otherwise,'SSH' is used as carriage protocol. By default, this parameter is set to 'false'."]},{"i":"authentication-parameters-1","l":"Authentication parameters","p":["Parameters used for configuration of the basic authentication method against NETCONF server. These parameters must be specified in the input request.","network-topology:username- Name of the user that has permission to access device using NETCONF management line.","network-topology:password- Password to the user in non-encrypted format.","There are also other authentication parameters if different authentication method is used - for example, key-based authentication requires specification of key-id. All available authentication parameters can be found in netconf-node-topology.yang under netconf-node-credentials grouping."]},{"l":"Session timers","p":["The following parameters adjust timers that are related with maintaining of NETCONF session state. None of these parameters are mandatory(default values will be used).","netconf-node-topology:initial-connection-timeout- Specifies timeout in seconds after which initial connection to the NETCONF server must be established (default value: 20 s).","netconf-node-topology:request-transaction-timeout- Timeout for blocking RPC operations within transactions (default value: 60 s).","netconf-node-topology:max-connection-attempts- Maximum number of connection attempts (default value: 1).","netconf-node-topology:max-reconnection-attempts- Maximum number of reconnection attempts (default value: 0 - disabled).","netconf-node-topology:between-attempts-timeout- Initial timeout between reconnection attempts (default value: 2 s).","netconf-node-topology:reconnenction-attempts-multiplier- Multiplier between subsequent delays of reconnection attempts (default value: 1.5).","netconf-node-topology:keepalive-delay- Delay between sending of keepalive RPC messages (default value: 120 sec).","netconf-node-topology:confirm-commit-timeout- The timeout for confirming the configuration by \"confirming-commit\" that was configured by \"confirmed-commit\". Configuration will be automatically reverted by device if the \"confirming-commit\" is not issued within the timeout period. This parameter has effect only on NETCONF nodes. (default value: 600 sec)."]},{"l":"Capabilities","p":["Parameters related to capabilities are often used when NETCONF device doesn't provide list of YANGs. Both parameters are optional.","netconf-node-topology:yang-module-capabilities- Set a list of capabilities to override capabilities provided in device's hello message. It can be used for devices that do not report any yang modules in their hello message.","netconf-node-topology:non-module-capabilities- Set a list of non-module based capabilities to override or merge non-module capabilities provided in device's hello message. It can be used for devices that do not report or incorrectly report non-module-based capabilities in their hello message.","Instead of defining netconf-node-topology:yang-module-capabilities, we can just define folder with yang schemas netconf-node-topology:schema-cache-directory: folder-name. For more information about using the netconf-node-topology:schema-cache-directory parameter, see RST Other parameters."]},{"l":"UniConfig-native","p":["Parameters related to installation of NETCONF or CLI nodes with uniconfig-native support.","uniconfig-config:uniconfig-native-enabled- Whether uniconfig-native should be used for installation of NETCONF or CLI node. By default, this flag is set to 'false'.","uniconfig-config:install-uniconfig-node-enabled- Whether node should be installed to UniConfig and unified layers. By default, this flag is set to 'true'.","uniconfig-config:sequence-read-active- Forces reading of data sequentially when mounting device. By default, this flag is set to'false'. This parameter has effect only on NETCONF nodes.","uniconfig-config:whitelist- List of root YANG entities that should be read. This parameter has effect only on NETCONF nodes.","uniconfig-config:blacklist- List of root YANG entities that should not be read from NETCONF device due to incompatibility with uniconfig-native or other malfunctions in YANG schemas. This parameter has effect only on NETCONF nodes.","uniconfig-config:validation-enabled- Whether validation RPC should be used before submitting configuration of node. By default, this flag is set to 'true'. This parameter has effect only on NETCONF nodes.","uniconfig-config:confirmed-commit-enabled- Whether confirmed-commit RPC should be used before submitting configuration of node. By default, this flag is set to 'true'. This parameter has effect only on NETCONF nodes.","uniconfig-config:store-failed-installation- Whether the installation should be stored in the database if it fails (e.g. is unreachable). The node will be 'installed' even though it failed and the user has 2 options:","uninstall the device and reinstall it.","call sync-from-network to sync the data from the device."]},{"l":"Flags","p":["Non-mandatory flag parameters that can be added to mount-request.","netconf-node-topology:enabled-strict-parsing- Default value of enabled-strict-parsing parameter is set to 'true'. This may inflicts in throwing exception during parsing of received NETCONF messages in case of unknown elements. If this parameter is set to 'false', then parser should ignore unknown elements and not throw exception during parsing.","netconf-node-topology:enabled-notifications- Default value of enabled-notifications is set to 'true'. If it is set to 'true' and NETCONF device supports notifications, NETCONF mountpoint will expose NETCONF notification and subscription services.","netconf-node-topology:reconnect-on-changed-schema- Default value of reconnect-on-changed-schema is set to 'false'. If it is set to 'true', NETCONF notifications are supported by device, and NETCONF notifications are enabled ('enabled-notifications' flag), the connector would auto disconnect/reconnect when schemas are changed in the remote device. The connector subscribes (right after connect) to base netconf notifications and listens for netconf-capability-change notification","netconf-node-topology:streaming-session- Default value of streaming-session parameter is set to 'false'. NETCONF session is created and optimized for receiving of NETCONF notifications from remote server."]},{"l":"Other parameters","p":["Other non-mandatory parameters that can be added to mount-request.","netconf-node-topology:schema-cache-directory- This parameter can be used for two cases:","Explicitly set name of NETCONF cache directory. If it is not set, the name of the schema cache directory is derived from device capabilities during mounting process.","Direct usage of the 'custom' NETCONF cache directory stored in the UniConfig 'cache' directory by name. This 'custom' directory must exist, must not be empty and also can not use the 'netconf-node-topology:yang-module-capabilities' parameter, because capability names will be generated from yang schemas stored in the 'custom' directory.","netconf-node-topology:dry-run-journal-size- Creates dry-run mount-point and defines number of NETCONF RPCs in history for dry-run mount-point. Value 0 disables dry-run functionality (it is default value).","netconf-node-topology:custom-connector-factory- Specification of the custom NETCONF connector factory. For example, if device doesn't support candidate data-store, this parameter should be set to 'netconf-customization-alu-ignore-candidate' string (default value is \"default\").","netconf-node-topology:edit-config-test-option- Specification of the test-option parameter in the netconf edit-config message. Possible values are 'set', 'test-then-set' or 'test-only'. If the edit-config-test-option is not explicitly specified in the mount request, then the default value will be used ('test-then-set'). See RFC-6241 for more information about this feature.","netconf-node-topology:concurrent-rpc-limit- Defines maximum number of concurrent RPCs, where 0 indicates no limit (it is default value).","There are additional install parameters in our OpenAPI, they can all be found here."]},{"l":"Example netconf request"},{"l":"Uninstalling Netconf device"},{"i":"example-request-1","l":"Example request"},{"l":"Installing SNMP agent"},{"l":"Identification of remote agent","p":["List of basic connection parameters that are used for identification of remote agent.","node-id- Name of node that represents device / mount-point in the topology.","snmp-topology:host- IP address or domain-name of target device where SNMP agent is running.","snmp-topology:port- SNMP port on which SNMP agent is listening to incoming connections."]},{"l":"SNMP parameters","p":["snmp-topology:transport-type- UniConfig currently supports UDP for SNMP communication, with plans to add TCP support in the future.","snmp-topology:snmp-version- UniConfig currently supports V1 and V2c version of the SNMP, with plans to add V3 support in the future.","snmp-topology:connection-retries- Sets the number of retries to be performed before a request is timed out. Default value is 0.","snmp-topology:request-timeout- Timeout in milliseconds before a confirmed request is resent or timed out. Default value is 3000.","snmp-topology:get-bulk-size- The maximum number of values that can be returned in a single response to the get-bulk operation. Default value is 50."]},{"i":"authentication-parameters-2","l":"Authentication parameters","p":["snmp-topology:community-string- UniConfig currently supports only security string as authentication method that is used with V1 and V2c."]},{"l":"Others","p":["snmp-topology:mib-repository- Name of the MIB repository that contains MIB files."]},{"i":"example-request-2","l":"Example request"},{"l":"Uninstalling SNMP agent"},{"i":"example-request-3","l":"Example request"}],[{"l":"UniConfig CLI"},{"l":"Introduction","p":["The CLI southbound plugin enables the Frinx UniConfig to communicate with CLI devices that do not speak NETCONF or any other programmatic API. The CLI service module uses YANG models and implements a translation logic to send and receive structured data to and from CLI devices. This allows applications to use a service model or unified device model to communicate with a broad range of network platforms and SW revisions from different vendors.","Much like the NETCONF southbound plugin, the CLI southbound plugin enables fully model-driven, transactional device management for internal and external OpenDaylight applications. In fact, the applications are completely unaware of underlying transport and can manage devices over the CLI plugin in the same exact way as over NETCONF.","Once we have installed the device, we can present an abstract, model-based network device and service interface to applications and users. For example, we can parse the output of an IOS command and return structured data.","CLI southbound plugin"]},{"l":"Architecture","p":["This section provides an architectural overview of the plugin, focusing on the main components."]},{"l":"CLI topology","p":["The CLI topology is a dedicated topology instance where users and applications can:","install a CLI device,","uninstall a device,","check the state of connection,","read/write data from/to a device,","execute RPCs on a device.","This topology can be seen as an equivalent of topology-netconf, providing the same features for netconf devices. The topology APIs are YANG APIs based on the ietf-topology model. Similarly to netconf topology, CLI topology augments the model with some basic configuration data and also some state to monitor mountpoints."]},{"l":"CLI mountpoint","p":["The plugin relies on MD-SAL and its concept of mountpoints to expose management of a CLI device. By exposing a mountpoint into MD-SAL, it enables the CLI topology to actually access the device's data in a structured/YANG manner. Components of such a mountpoint can be divided into 3 distinct layers:","Service layer - implementation of MD-SAL APIs delegating execution to transport layer.","Translation layer - a generic and extensible translation layer. The actual translation between YANG and CLI takes place in the extensions. The resulting CLI commands are then delegated to transport layer.","Transport layer - implementation of various transport protocols used for actual communication with network devices.","The following diagram shows the layers of a CLI mountpoint:"]},{"l":"Translation layer","p":["The CLI southbound plugin is as generic as possible. However, the device-specific translation code (from YANG data -\\ CLI commands and vice versa), needs to be encapsulated in a device-specific translation plugin. E.g. Cisco IOS specific translation code needs to be implemented by Cisco IOS translation plugin before FRINX UniConfig can manage IOS devices. These translation plugins in conjunction with the generic translation layer allow for a CLI mountpoint to be created."]},{"l":"Device specific translation plugin","p":["Device specific translation plugin is a set of:","YANG models","Data handlers","RPC implementations","that actually","defines the model/structure of the data in FRINX UniConfig","implements the translation between YANG data and device CLI in a set of handlers","(optionally) implements the translation between YANG RPCs and device CLI","The plugin itself is responsible for defining the mapping between YANG and CLI. However, the translation layer into which it plugs in is what handles the heavy lifting for it e.g. transactions, rollback, config data storage etc. Additionally, the SPIs of the translation layer are very simple to implement because the translation plugin only needs to focus on the translations between YANG <-\\ CLI."]},{"l":"Units","p":["In order to enable better extensibility of the translation plugin and also to allow the separation of various aspects of a device's configuration, a plugin can be split into multiple units. Where a unit is actually just a subset of a plugin's models, handlers and RPCs.","A single unit will usually cover a particular aspect of device management e.g. the interface management unit.","Units can be completely independent or they can build on each other, but in the end (in the moment where a device is being installed) they form a single translation plugin.","Each unit has to be registered under a specific device type(s) e.g. an interface management unit could be registered for various versions of the IOS device type. When installing an IOS device, the CLI southbound plugin collects all the units registered for the IOS device type and merges them into a single plugin enabling full management.","The following diagram shows an IOS device translation plugin split into multiple units:","IOS translation plugin"]},{"l":"Transport layer","p":["For now, two transport protocols are supported:","SSH","Telnet","They implement the same APIs, which enables the translation layer of the CLI plugin to be completely independent of the underlying protocol in use. Deciding which transport will be used to manage a particular device is simply a matter of install-request configuration.","The transport layer can be specified using install-request'cli-topology:transport-type' parameter."]},{"l":"Data processing","p":["There are 2 types of data depending on data-store in which data is stored:","Config","Operational","This section details how these data types map to CLI commands.","Just as there are 2 types of data, there are 2 streams of data in the CLI southbound plugin:","It represents user/application intended configuration for the device.","Translation plugins/units need to handle this configuration in data handlers as C(reate), U(pdate) and D(elete) operations. R(ead) pulls this config data from the device and updates the cache on its way back.","Config data","It represents actual configuration on the device, optionally statistics from the device.","Translation plugins/units need to pull these data out of the device when R(ead) operation is requested.","Operational data","RPCs stand on their own and can encapsulate any command(s) on the device."]},{"l":"RPCs provided by CLI layer","p":["There are multiple RPCs that can be used to send commands to a CLI session and optionally wait for command output. The CLI layer also provides one additional RPC for computing configuration coverage by cli-units. To use all of these RPCs, it is required to have an installed CLI device in the 'Connected' state."]},{"i":"rpc-execute-and-read","l":"RPC: Execute-and-read"},{"l":"Description","p":["Execution of the sequence of commands specified in the input. These commands must be separated by the new line - then, each of the command is executed separately.","After all commands are executed, it is assumed, that the original command prompt (prompt that was set before execution of this RPC) appears on the remote terminal.","If the input contains only single command, output of this RPC will contain only output of this command. If input contains multiple commands separated by newline, output of this RPC will be built from command prompts (except the prompt of the first command), input commands and outputs returned from remote terminal."]},{"l":"Example","p":["Following RPC demonstrates listing of all interfaces with configured IP addresses plus listing of available routing protocols that can be enabled from global configuration mode. Since the last entered command is placed in configuration mode (for example, starting with'Router(config)#'), it is required to return back to Privileged EXEC mode (for example, starting with 'Router#') using 'end' command and'no' confirmation to not save changes. Also, 'wait-for-output-timer' is configured to 2 seconds - CLI layer waits for command output returned from device up to 2 seconds.","Remember that the last command prompt must equal to original prompt otherwise CLI session fails on timeout and CLI mountpoint must be recreated.","RPC reply with unescaped output string (output can be easily unescaped with 'printf' linux application):","Description of RPC-request input body fields:","command(mandatory) - The list of commands that are sent to device. Commands must be separated by newline character. Every command-line is executed separately.","wait-for-output-timer(optional) - By default (if this parameter is not set or set to 0), outputs from entered commands are collected after caught echo of the next typed command in CLI session (or command prompt, if the command is the last one from input sequence). Then, the collected output contains output of the previous command + echo of the current command that hasn't been executed by sending newline character yet. This process is simplified by setting'wait-for-output-timer' value. In this case,'waiting-for-command-echo' procedure is not applied, rather next command is executed only after specified number of seconds after which the reply from CLI session should already be available (if it won't be available, then command output will be read after execution of the next command - outputs can be messed up).","error-check(optional) - By default, UC does not check for errors in commands. If error-handling is enabled and an error occurs, RPC will fail."]},{"l":"Wait-for-echo behaviour","p":["The comparison between described wait-for-echo approaches can be demonstrated in the steps of processing 2 command-lines:","'wait-for-output-timer' is not set or it set to value 0","write command 1","wait for command 1 echo","hit enter","write command 2","wait for command 2 echo","read until command prompt appears","'wait-for-output-timer' is specified in request","read output until timeout expires","Even if the 'wait-for-output-timer' is configured, the last output must equal to original command-prompt."]},{"i":"rpc-execute-and-expect","l":"RPC: Execute-and-expect"},{"i":"description-1","l":"Description","p":["It is a form of the 'execute-and-read' RPC that additionally may contain 'expect(..)' patterns used for waiting for specific outputs/prompts. It can be used for execution of interactive commands that require multiple subsequent inputs with different preceding prompts.","The body of 'expect(..)' pattern must be specified by Java-based regular expression typed between the brackets (see https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html","documentation about regular expressions used in Java language).","'expect(..)' pattern can only be used for testing of previous command line output including next command prompt. From this reason, it is also a suitable tool for testing of specific command prompts.","'expect(..)' pattern must be specified on the distinct line. If multiple 'expect(..)' patterns are chained on neighboring lines, then all of them must match previous output (patterns are joined using logical AND operation).","Output of this RPC reflects the whole dialogue between Frinx UniConfig client and remote terminal except the initial command-prompt.","'wait-for-output-timer' parameter can also be specified in this RPC","but in this case, it applies only for non-interactive commands - commands that are not followed by 'expect(..)' pattern. It is possible to mix interactive and non-interactive commands in input command snippet.","If 'expect' pattern doesn't match previous output, Execute-and-expect RPC will fail on timeout (fixed 3 seconds) for reading next input and CLI session will drop immediately."]},{"i":"example-1","l":"Example","p":["The following RPC requests shows execution of interactive command for copying of file from TFTP server. The CLI prompt subsequently ask for source filename and destination filename. These prompts are asserted by'expect(..) pattern. The last 'expect(..) pattern just waits for confirmation about number of copied bytes.","RPC reply with unescaped output string (output can be easily unescaped with 'printf' linux application):","Backslash is a special character that must be escaped in JSON body. From this reason, in the previous example, there are two backslashes proceeding regular-expression constructs.","If 'execute-and-expect' command field doesn't contain any 'expect(..)' patterns, it will be evaluated in the same way like 'execute-and-read' RPC."]},{"i":"rpc-execute-and-read-until","l":"RPC: Execute-and-read-until"},{"i":"description-2","l":"Description","p":["It is form of the 'execute-and-read' RPC that allows to explicitly specify 'last-output' that CLI expect at the end of commands executions (after the last command has been sent to device).","If explicitly specified 'last' output is not found at the end of the output, again, the session will be dropped and recreated similarly to behaviour of 'execute-and-read' RPC."]},{"i":"example-2","l":"Example","p":["The following request shows sending of the configuration snippet for disabling of automatic network summary (RIP routing protocol). After executing of these commands, command prompt is switched to'RP/0/0/CPU0:XR5(config-rip)#' - it is not the same like initial command prompt 'RP/0/0/CPU0:XR5#'. From this reason it is required to return back to initial command prompt by sending of additional commands or specification of 'last-output' as it is demonstrated in this example.","RPC reply with unescaped output string (output can be easily unescaped with 'printf' linux application):","Set 'last-output' is saved within current CLI session - if you send next 'execute-and-read' RPC, it is assumed that the initial and last output is newly configured 'last-output'."]},{"i":"rpc-execute","l":"RPC: Execute"},{"i":"description-3","l":"Description","p":["Simple execution of single or multiple commands on remote terminal. Multiple commands must be separated by newline in the input. The outputs from commands are not collected - output of this RPC contains only status message.","This RPC can be used in cases where it is not necessary to obtain outputs of entered commands.","After all commands are executed, the last output is not checked against expected output."]},{"i":"example-3","l":"Example","p":["The following example demonstrates 'execute' RPC on creation of simple static route and committing of made change.","RPC reply - output contains just status message:"]},{"i":"rpc-config-coverage","l":"RPC: config-coverage"},{"i":"description-4","l":"Description","p":["RPC reads the entire device configuration, determines the coverage of the configuration by translation units and returns simple or complex output. The user can define a preferred output in RPC input. The default is simple output.","Simple output contains one string that consists of all lines of the device configuration. Each line starts with '+' if it is covered or'-' if not and ends with a '\\n' marker.","Complex output contains a list of commands. Each entry in the list includes the following fields:","'covered', which indicates whether the entire command is covered or not. Can be either 'true' or 'false'.","'non-parsable-parts', which is visible only if the entire command is not covered. Contains a list of those command parts that are not covered. If no parts of the command are covered, only contains the word 'ALL'.","'command', which includes the entire command."]},{"l":"Simple output example","p":["RPC reply:"]},{"l":"Complex output example","p":["RPC reply:"]}],[{"l":"UniConfig NETCONF"},{"l":"Overview","p":["NETCONF is an Internet Engineering Task Force (IETF) protocol used for configuration and monitoring of devices in a network. It can be used to“create, recover, update, and delete configurations of network devices”. The base NETCONF protocol is described in RFC-6241.","NETCONF operations are overlaid on the Remote Procedure Call (RPC) layer and may be described in either XML or JSON."]},{"l":"NETCONF southbound plugin"},{"l":"Introduction to southbound plugin and netconf-connectors","p":["The NETCONF southbound plugin is capable of connecting to remote NETCONF devices and exposing their configuration/operational datastores, RPCs and notifications as MD-SAL mount points. These mount points allow applications and remote users (over RESTCONF) to interact with the mounted devices.","In terms of RFCs, the southbound plugin supports:","Network Configuration Protocol (NETCONF) - RFC-6241","NETCONF Event Notifications - RFC-5277","YANG Module for NETCONF Monitoring - RFC-6022","YANG Module Library - draft-ietf-netconf-yang-library-06","NETCONF is fully model-driven (utilizing the YANG modelling language) so in addition to the above RFCs, it supports any data/RPC/notifications described by a YANG model that is implemented by the device.","By mounting of NETCONF device a new netconf-connector is created. This connector is responsible for:","keeping state of NETCONF session between NETCONF client that resides on FRINX UniConfig distribution and NETCONF server (remote network device)","sending / receiving of NETCONF RPCs that are used for reading / configuration of network device","interpreting of NETCONF RPCs by mapping of their content using loaded device-specific YANG schemas","There are 2 ways for configuring a new netconf-connector: NETCONF or RESTCONF. This guide focuses on using RESTCONF."]},{"l":"Spawning of netconf-connectors while the controller is running","p":["To configure a new netconf-connector (NETCONF mount-point) you need to create a node in configuration data-store under 'topology-netconf'. Adding of new node under NETCONF topology automatically triggers data-change-event that at the end triggers mounting process of the NETCONF device. The following example shows how to mount device with node name 'example' (make sure that the same node name is specified in URI and request body under 'node-id' leaf).","This spawns a new netconf-connector with name 'example' which tries to connect to the NETCONF device at '192.168.1.100' and port '22'. Both username and password are set to 'test' and SSH is used as channel for transporting of NETCONF RPCs (if 'tcp-only' leaf is set to 'true', NETCONF application protocol is running directly on top of the TCP protocol).","Right after the new netconf-connector is created, NETCONF layer writes some useful metadata into the operational data-store of MD-SAL under the network-topology subtree. This metadata can be found at:","Information about connection status, device capabilities, etc. can be found there.","You can check the configuration of device by accessing of'yang-ext:mount' container that is created under every mounted NETCONF node. The new netconf-connector will now be present there. Just invoke:","The response will contain the whole configuration of NETCONF device. You can fetch smaller slice of configuration using more specific URLs under'yang-ext:mount' too."]},{"i":"authentification-with-privatepublic-key","l":"Authentification with private/public key","p":["This type of authentification is used when you want to connect to the NETCONF device via private/public key, it is necessary to save public key into device, then put private key into UniConfig and when trying to configure NETCONF mount-point to connect via ssh key and not password.","To accomplish that, follow these steps :","1. Generate private/public key-pair on your local machine","2. Change .pub format into .bin format","3. Copy public key into device directory. Password of the device will be required.","4.(Optional) Check if the public key is on device","5. Import public key to device","6. Log in with private key to device NETCONF subsystem. Passphrase for key will be required.","7. Start UniConfig and insert keystore with private key into it.","RPC request:","8. Create mount-point with key-id","Delete public key","Login to device, remove rsa public key and after that, it is also possible to delete key from device directory."]},{"l":"PKI Data persistence in NETCONF","p":["PKI data is used for authentication of NETCONF sessions with the provided RSA private key. The corresponding public key must be stored on the device side.","Keys are identified using a unique 'key-id'. This key identifier can be specified in the NETCONF installation request.","Keys can be managed using the 'remove-keystore-entry' and 'add-keystore-entry' operations. These RPC calls are part of the UniConfig transaction. Changes are not applied until they are committed by the user or the immediate commit model is used to invoke the operation.","Keys are stored in the UniConfig database. In a clustered environment, all nodes share the same set of keys."]},{"l":"Registration of the new key","p":["The following request demonstrates how to register a new RSA private key with a key-id of 'key1'. The private key must be specified in the PKCS#8 format. The passphrase is optional and must be specified only if the private key is encrypted.","Multiple keys can be registered at once if the user provides a list of the 'key-credential' in the input."]},{"l":"Removing of the existing key","p":["The following example shows how to remove the existing key 'key1' from UniConfig. It is possible to remove multiple keys at once."]},{"l":"Reading list of the existing keys","p":["The following example shows how to read list of the existing keys from UniConfig.","Note: Both 'passphrase' and 'private-key' are additionally encrypted by the UniConfig encryption system to protect confidential data."]},{"l":"Keepalive settings","p":["If the NETCONF session haven't been created yet, the session is tried to be established only within maximum connection timeout. If this timeout expires before NETCONF session is established, underlay NETCONF channel is closed (reconnection strategy will not be started). After the NETCONF session has been successfully created, there are two techniques how the connection state is kept alive:","TCP acknowledgements- NETCONF is running on top of the TCP protocol that can handle dropped packets by decreasing of window size and resending of lost TCP segments. Working TCP connection doesn't imply working state of the application layer (NETCONF session) - keepalive messages are required too.","Explicit NETCONF keepalive messages- Keepalive messages test whether NETCONF server is alive - server responds to keepalive messages within NETCONF RPC timeout.","If TCP connection is dropped or NETCONF server doesn't respond within keepalive timeout, NETCONF launches reconnection strategy. To summarize it all, there are 3 configurable parameters that can be set in mount-request:","Initial connection timeout [seconds]- Specifies timeout in milliseconds after which initial connection to the NETCONF server must be established. By default, the value is set 20 s.","Keepalive delay [seconds]- Delay between sending of keepalive RPC messages to the NETCONF server. Keepalive messages test state of the NETCONF session (application layer) - whether remote side is able to respond to RPC messages. Default keepalive delay is 120 seconds.","Request transaction timeout [seconds]- Timeout for blocking RPC operations within transactions. Southbound plugin stops to wait for RPC reply after this timeout expires. By default, it is set to 60 s.","Example with set keepalive parameters at creation of NETCONF mount-point(connection timeout, keepalive delay and request timeout):"]},{"l":"Reconnection strategy","p":["Reconnection strategies are used for recovering of the lost connection to the NETCONF server. The behaviour of the reconnection can be described by 3 configurable mount-request parameters:","Maximum number of connection attempts [count]- Maximum number of initial connection retries; when it is reached, the NETCONF won't try to connect to device anymore. By default, this value is set to 1.","Maximum number of reconnection attempts [count]- Maximum number of reconnection retries; when it is reached, the NETCONF won't try to reconnect to device anymore. By default, this value is set to 1.","Initial timeout between attempts [seconds]- The first timeout between reconnection attempts in milliseconds. The default timeout value is set to 2000 ms.","Reconnection attempts multiplier [factor]- After each reconnection attempt, the delay between reconnection attempts is multiplied by this factor. By default, it is set to 1.5. This means that the next delay between attempts will be 3 s, then it will be 4,5 s, etc.","Example with set reconnection parameters at creation of NETCONF mount-point - maximum connection attempts, initial delay between attempts and sleep factor:"]},{"l":"Local NETCONF cache repositories","p":["The netconf-connector in OpenDaylight relies on'ietf-netconf-monitoring' support when connecting to remote NETCONF device. The 'ietf-netconf-monitoring' feature allows netconf-connector to list and download all YANG schemas that are used by the device. These YANG schemas are afterwards used by NETCONF southbound plugin for interpretation of RPCs. The following rules apply for maintaining of local NETCONF cache repositories:","By default, for each device type, the separate local repository is prepared.","All NETCONF repositories are backed up by separate sub-directory under 'cache' directory of UniConfig Distribution.","NETCONF device types are distinguished by unique set of YANG source identifiers - module names and revision numbers. For example, if 2 NETCONF devices differ only in revision of one YANG schema, these NETCONF devices are recognized to have different device types.","Format of the name of generated NETCONF cache directory at runtime is 'schema_id', where 'id' represents unique integer computed from hash of all source identifiers. This generation of cache directory name is launched only at mounting of new NETCONF device and only if another directory with the same set of source identifiers haven't been registered yet.","You can still manually provide NETCONF cache directories with another format before starting of UniConfig Distribution or at runtime - such directories don't have to follow 'schema_id' format.","The NETCONF repository can be registered in 3 ways:","Implicitly by mounting of NETCONF device that has NETCONF monitoring capability and another devices with the same type hasn't already been mounted.","At booting of FRINX UniConfig distribution, all existing sub-directories of 'cache' root directory are registered as separate NETCONF repositories.","At runtime, by invocation of 'schema-resources:register-repository' RPC.","Already registered schema repositories can be listed using following request:","It should return list of ODL nodes in cluster with list of all loaded repositories. Each repository have associated list of source identifiers. See the following example of GET request output:"]},{"l":"Local Netconf default cache repository","p":["Before booting of FRINX UniConfig, the user can put the 'default' repository in the ‘cache’ directory. This directory should contain the most frequently missing sources. As mentioned above, if the device supports ‘ietf-netconf-monitoring’ and there is no directory in the'cache' with all sources that the device requires, then NETCONF will generate directory with name ‘schema_id’, where ‘id’ represents unique integer. The generated repository may not contain all required schemas because device may not provide them. In such case, the missing sources will be searched in the 'default' repository and if sources will be located there, generated repository will be supplemented by the missing sources. In general, there are 2 situations that can occur:","Missing imports","The device requires and provides a resource which for its work requires additional resources that are not covered by provided resources.","Source that is not covered by provided sources","The device requires but does not provide a specific source.","note Using the 'default' directory in the 'cache' directory is optional."]},{"l":"Connecting to a device not supporting NETCONF monitoring","p":["NETCONF connector can only communicate with a device if it knows the set of used schemas (or at least a subset). However, some devices use YANG models internally but do not support NETCONF monitoring. Netconf-connector can also communicate with these devices, but you must load required YANG models manually. In general, there are 2 situations you might encounter:","NETCONF device does not support 'ietf-netconf-monitoring' but it does list all its YANG models as capabilities in HELLO message","This could be a device that internally uses, for example,'ietf-inet-types' YANG model with revision '2010-09-24'. In the HELLO message, that is sent from this device, there is this capability reported as the following string (other YANG schemas can be reported as capabilities in the similar format):","The format of the capability string is following:","[NAMESPACE] - Namespace that is specified in the YANG schema.","[MODULE_NAME] - Name of the YANG module.","[REVISION] - The newest revision that is specified in the YANG schema (it should be specified as the first one in the file). note Revision number is not mandatory (YANG model doesn't have to contain revision number) - then, the capability is specified without the'&' and revision too. For such devices you have to side load all device YANG models into separate sub-directory under 'cache' directory (you can choose random name for this directory, but directory must contain only YANG files of one device type).","NETCONF device does not support 'ietf-netconf-monitoring' and it does NOT list its YANG models as capabilities in HELLO message","Compared to device that lists its YANG models in HELLO message, in this case there would be no specified capabilities in the HELLO message. This type of device basically provides no information about the YANG schemas it uses so its up to the user of OpenDaylight to properly configure netconf-connector for this device. Netconf-connector has an optional configuration attribute called'yang-module-capabilities' and this attribute can contain a list of'yang-module-based' capabilities. By setting this configuration attribute, it is possible to override the 'yang-module-based' capabilities reported in HELLO message of the device. To do this, we need to mount NETCONF device or modify the configuration of existing netconf-connector by adding the configuration snippet with explicitly specified capabilities (it needs to be added next to the address, port, username etc. configuration elements). The following example shows explicit specification of 6 capabilities:","Remember to also put the YANG schemas into the cache folder like in the case 1."]},{"l":"Registration or refreshing of NETCONF cache repository using RPC","p":["This RPC can be used for registration of new NETCONF cache repository or updating of NETCONF cache repository. This is useful when user wants to add new NETCONF cache repository at runtime of FRINX UniConfig distribution for device that doesn't support 'ietf-netconf-monitoring' feature. It can also be used for refreshing of repository contents (YANG schemas) at runtime.","The following example shows how to register a NETCONF repository with name 'example-repository'. The name of the provided repository must equal to name of the directory which contains YANG schemas.","If the repository registration or refreshing process ends successfully, the output contains just set 'status' leaf with 'success' value:","On the other side, if the directory with input 'repository-name' does not exist, directory doesn't contain any YANG files, or schema context cannot be built using provided YANG sources the response body will contain 'failed' 'status' and set 'error-message'. For example, non-existing directory name produces following response:","Constraints:","Only the single repository can be registered using one RPC request.","Removal of registered repositories is not supported for now."]},{"l":"Reconfiguring netconf-connector while the controller is running","p":["It is possible to change the configuration of an already mounted NETCONF device while the whole controller is running. This example will continue where the last left off and will change the configuration for the existing netconf-connector after it was spawned. Using one RESTCONF request, we will change both username and password for the netconf-connector.","To update an existing netconf-connector you need to send following request to RESTCONF:","Since a PUT is a replace operation, the whole configuration must be specified along with the new values for username and password. This should result in a '2xx' response and the instance of netconf-connector called 'example' will be reconfigured to use username 'bob' and password'passwd'. New configuration can be verified by executing:","With new configuration, the old connection will be closed and a new one established."]},{"l":"Destroying of netconf-connector","p":["Using RESTCONF one can also destroy an instance of a netconf-connector - NETCONF connection will be dropped and all resources associated with NETCONF mount-point on NETCONF layer will be cleaned (both CONFIGURATION and OPERATIONAL data-store information). To do this, simply issue a request to following URL:","The last element of the URL is the name of the mount-point."]},{"l":"NETCONF TESTTOOL"},{"l":"Testtool overview","p":["NETCONF testtool is the Java application that:","Can be used for simulation of 1 or more NETCONF devices (it is suitable for scale testing).","Uses core implementation of NETCONF NORTHBOUND server.","Provides broad configuration options of simulated devices.","Supports YANG notifications.","NETCONF testtool is available at netconf repository of ODL( into config/ folder of FRINX UniConfig distribution, this file contains xml paths that should be ignored while removing duplicate nodes from the netconf message","Optional:","put file namespaceBlacklist.txt into config/ folder of FRINX UniConfig distribution, this file contains xml namespaces of the nodes that should be removed from the netconf message","Now UniConfig can be started."]},{"l":"Install SROS device","p":["To install the SROS device run:","Where:","sros: is the name of the device","10.19.0.18: is the IP address of the device","830: is the port number of the device","USERNAME: is the username to access the device","PASSWORD: is the respective password","\"uniconfig-config:uniconfig-native-enabled\": allows to enable installing through UniConfig Native","\"uniconfig-config:install-uniconfig-node-enabled\": allows to disable installing to uniconfig and unified layers","\"uniconfig-config:path\": allows to specify a list of root elements from models present on device to be ignored by UniConfig Native","In case of success the return code is 201."]},{"l":"Check if SROS device is connected","p":["To check if the device is properly connected run:","In case of success the return code is 200, and the response body contains something similar to:"]},{"l":"Check if SROS device configuration is available in UniConfig","p":["To check if the SROS device configuration has been properly loaded in the UniConfig config datastore, run:","In case of success the return code is 200 and the response body contains something similar to:"]}],[{"l":"UniConfig SNMP"},{"l":"Introduction","p":["The SNMP (Simple Network Management Protocol) southbound plugin enables Frinx UniConfig to communicate with an SNMP agent, which is a software module installed on network devices. It collects information about the status, performance, and configuration of these devices.","The SNMP southbound plugin follows a fully model-driven approach, similar to CLI or NETCONF southbound plugins. However, the difference lies in the fact that it uses MIB (Management Information Base) for data modeling instead of YANG."]},{"l":"Architecture","p":["This section provides an architectural overview of the plugin, focusing on the main components."]},{"l":"SNMP topology","p":["The SNMP topology is a dedicated topology instance where users and applications can:","install an SNMP agent,","uninstall an agent,","read device configuration settings or performance metrics"]},{"l":"SNMP mountpoint","p":["The plugin relies on MD-SAL and its concept of mountpoints to expose information about a device. By exposing a mountpoint in MD-SAL, it enables the SNMP topology to access device information in a structured form."]},{"l":"Local SNMP MIB repositories","p":["It is necessary to provide /mibs directory that has to contain:","repository - it is directory that contains mib files. It is possible to use any name.","mib.metadata file - through this file, we inform UniConfig that we have added, removed, or modified some MIB file in the repository. Just insert the repository name and any arbitrary string and UniConfig will update the relevant context for particular repository."]},{"i":"example-of-mibmetadata-file","l":"Example of mib.metadata file"},{"l":"Example of requests","p":["UniConfig currently supports read operation, with plans to add write operation in the future."]},{"l":"GET request"}],[{"l":"Updating installation parameters"},{"l":"Overview","p":["During device installation UniConfig creates a mount-point for this device and stores it in the database. This mount-point contains all parameters set by the user in the installation request. UniConfig supports a feature to update mount-point parameters. It is possible to use it for both NETCONF and CLI nodes."]},{"l":"Show installation parameters","p":["Parameters of the installed devices can be displayed using a GET request on the node. It is necessary to use the right topology. It should return the current node settings. See the following examples:","By default, both NETCONF and CLI topologies have the password parameter encrypted. This can be changed in the corresponding yang schema by adding/removing the extension flag \"frinx-encrypt:encrypt\".","CLI node","Output:","NETCONF node"]},{"l":"Update installation parameters","p":["To update node installation parameters it is possible to use a PUT request with updated request body that is copied from the GET request from the previous section. It is also possible to update single parameter with direct PUT call to specific parameter.","If the password parameter is set to be encrypted, changing it will encrypt the input value.","CLI node","Update multiple parameters. Specifically:","host","dry-run-journal-size","journal-size","Update single parameter:","NETCONF node","keepalive-delay","After these changes, when we use the GET requests from the \"Show installation parameters\" section, then we can see that the parameters have actually been changed. It is also possible to use the GET request for single parameter."]}],[{"l":"UniConfig-native CLI"},{"l":"Introduction","p":["UniConfig-native CLI allows user configuration of CLI-enabled devices using YANG models that describe configuration commands. In UniConfig-native CLI deployment translation units are defined only by YANG models and device-specific characteristics that are used for parsing and serialization of commands. Afterwards, readers and writers are automatically created and provided to translation registry - user doesn't write them individually. YANG models can be constructed by following of well-defined rules that are explained in Developer Guide.","Summarized characteristics of UniConfig-native CLI:","modelling of device configuration using YANG models,","automatic provisioning of readers and writers by generic translation unit,","simple translation units per device type that must define device-characteristics and set of YANG models."]},{"l":"Installation","p":["CLI device can be installed as native-CLI device by adding'uniconfig-config:uniconfig-native-enabled' flag with 'true' value into the mount request (by default, this flag is set to 'false'). It is also required to use tree parsing engine that is enabled by default. All other mount request parameters that can be applied for classic CLI mountpoints can also be used in native-CLI configuration with the same meaning.","The following example shows how to mount Cisco IOS XR 5.3.4 device as native-CLI device with enabled dry-run functionality:","After mounting of CLI node finishes, you can verify CLI mountpoint by fetching its Operational datastore:","You can see that there are some native models included in the'available-capabilities' plus basic mandatory capabilities for CLI mountpoints. Number of supported native capabilities depends on number of written models that are included in native-CLI translation unit for IOS XR 5.3.4, in this case. The only common capability for all native-CLI mountpoints is' http://frinx.io/yang/native/extensions?module=cli-native-extensions'. Sample list of native capabilities:","The synced configuration on UniConfig layer can be verified in the same way as for all types of devices:","Since sample device configuration contains both ACL and interface configuration and native-CLI IOS XR 5.* covers this configuration, the synced data looks like the next output:","The previous sample output corresponds to the following parts of the configuration on the device:"]},{"l":"Architecture","p":["The following section describes building blocks and automated processes that take place in UniConfig-native CLI."]},{"l":"Modules","p":["The following UML diagram shows dependencies between modules from which UniConfig native-cli is built. The core of the system is represented by'native-cli-unit' module in CLI layer that depends on CLI API for registration of units and readers and writers API. On the other side there are CLI-units that extend 'GenericCliNativeUnit'.","Dependencies","Description of modules:","utils-unit and translation-registry-api/spi: CLI layer API which native-cli units depend on. It defines interface for CLI readers/writers, translation unit collector that can be used for registration of native-CLI unit, and common 'TranslateUnit' interface.","native-cli-unit: It is responsible for automatic provisioning and registration of readers and writers (handlers) based on YANG modules that are defined in specific translation units. Readers and writers are initialized only for root container and list schema nodes defined in YANG models. All specific native-CLI units must be derived from abstract class 'GenericCliNativeUnit'.","ios-xr-5-native and junos-17-native: Specific native-CLI units derived from 'GenericCliNativeUnit'. To make native-CLI unit working, it must implement methods that provides list of YANG modules, list of root data object interface, supported device versions, unit name, and CLI flavour."]},{"l":"Registration of handlers","p":["Registration of native-CLI handlers is described by following sequence diagram in detail.","Handlers","Description of the process:","Searching for root schema node: Extraction of the root list and container schema nodes from nodes that are augmented to UniConfig topology.","Building of device template information: Extraction of device template information from imported template YANG modules. This template contains command used for displaying of whole device configuration, format of configuration command, and format of delete command.","Initialization of handlers: Creation of native-CLI config readers and writers or native-CLI list readers and writers in case of list schema nodes.","Registration of handlers: Registration of readers and writers in reader and writer registries. Readers are registered as generic config readers, whereas writers are registered as wildcarded subtree writers.","Since native-CLI readers are not registered as subtree readers, it is possible to directly read only root elements from CLI mountpoint. This constraint is caused by unsupported wildcarded subtree readers in Honeycomb framework."]},{"l":"Functionality of readers","p":["Config readers and config list readers in UniConfig-native CLI are implemented as generic readers that parse device configuration into structuralized format based on registered native-CLI YANG models. These readers are initialized and registered per root data schema node that is supported in native-CLI. The next sequence diagram shows process taken by generic reader on calling 'readCurrentAttributes(..)' method.","Readers","Description of the process:","Creation of the configuration tree: It represents current device configuration by sending of 'show' command which is responsible for displaying of whole device configuration.","Transformation of configuration tree: It is transformed into binding-independent NormalizedNode using 'ConfigTreeStreamReader' component.","Conversion into binding-aware format: Conversion of binding-independent NormalizedNode into binding-aware DataObject and population of DataObject builder by fields from built DataObject.","Configuration is parsed into structuralized form before it is actually transformed into NormalizedNodes (step 1) because of more modular and easier approach. Configuration tree consists of 3 types of nodes:","Command nodes: They are represented by the last identifiers of the commands (command word). These nodes don't have any children nodes.","Section nodes: These nodes are represented by the command word / identifier that opens a new configuration section. Section nodes can have multiple children nodes.","Connector nodes: Connector nodes are similar to section nodes with identifier and multiple possible children nodes. However, they don't open a new configuration section; they represent just one intermediary word in command line.","Example - parsing of interface commands into the tree structure:","Parsing","Detailed description of algorithm for transformation of configuration tree into DOM objects:","Transformation","If some commands are not covered by native-CLI YANG models, the parsing of configuration in readers will not fail - unsupported nodes will be skipped."]},{"l":"Functionality of writers","p":["Config writers and config list writers are responsible for serialization of structuralized data from datastore into series of configuration or delete command lines that are compatible with target device. Native CLI writers are also registered only for root schema nodes on the same paths as readers. The next sequence diagram shows process taken by generic writer on calling 'writeCurrentAttributes(..)' or'deleteCurrentAttributes(..)' method.","Writers","Description of the process:","Conversion into binding-independent format: Conversion of binding-aware DataObject into binding-independent NormalizedNode format. Binding-independent format is more suited for automated traversal and building when the target class types of nodes are not known before compilation of YANG schemas is done.","Generation of command lines: NormalizedNode is serialized using stream writer into configuration buckets that are afterwards serialized into separated command lines. Conversion of configuration buckets into command lines can be customized by different strategies. Currently only the primitive strategy is used - it creates for each leaf command argument the full command line from top root - nesting into configuration modes is not supported. This step is described in detail by next activity diagram.","Generation of configuration or delete command lines: It is done by application of configuration or delete template on command line - for example, JUNOS devices use prefix 'set' for applying of the configuration and prefix 'delete' for removal of configuration from device.","Squashing of command lines into single snippet: This is only optimization step - all command lines are joined together with newline separator.","Sending of command to the device(blocking operation).","Configuration buckets are created as intermediary step because of the modularity and flexibility for application of different serialization strategies in future. There are 3 types of created buckets that are wired with respective schema nodes:","Leaf bucket: Bucket that doesn't have any children but it has a value in addition to the identifier. It is created from LeafNode.","Composite bucket: Bucket with identifier and possibly multiple children buckets. It can be used for following types of DOM nodes: ContainerNode or MapEntryNode.","Delegating bucket: Bucket that doesn't have any identifier, it just delegates configuration to its children buckets. It can be used for nodes that are described by ChoiceNode or MapNode.","Command serialization","The current implementation processes updates in default way - the whole actual configuration is removed and then the whole updated configuration is written back to device. This strategy can cause slow down of the commit operation in case of longer configuration and because of this reason it is addressed as one of the future improvements."]}],[{"l":"UniConfig Operations"},{"i":"sending-and-receiving-data-restconf","l":"Sending and receiving data (RESTCONF)","p":["RESTCONF represents REST API to access datastores and UniConfig operations."]},{"l":"UniConfig Node Manager API","p":["The responsibility of this component is to maintain configuration on devices based on intended configuration. Each device and its configuration is represented as a node in the uniconfig topology and the configuration of this node is described by using OpenConfig YANG models. The Northbound API of Uniconfig Manager (UNM) is RPC driven and provides functionality for commit with automatic rollback and synchronization of configuration from the network."]},{"l":"Device discovery","p":["This component is used to check reachable devices in a network. The manager checks the reachability via the ICMP protocol. Afterwards, the manager is able to check whether various TCP/UDP ports are open or not."]},{"l":"Dry-run Manager API","p":["The manager provides functionality showing CLI commands which would be sent to network element."]},{"l":"Snapshot Manager API","p":["The snapshot manager creates and deletes uniconfig snapshots of actual uniconfig topology. Multiple snapshots can be created in the system."]},{"l":"Subtree Manager API","p":["The subtree manager copies (merge/replace) subtrees between source and target paths."]},{"l":"Templates Manager API","p":["This component is responsible for application of templates into UniConfig nodes."]},{"l":"Transaction Log API","p":["This component is responsible for tracking transactions."]},{"l":"UniConfig Queries","p":["Using this component it is possible to invoke JSONB-path queries on top of the stored configuration."]},{"i":"dedicated-transaction-immediate-commit-model","l":"Dedicated transaction (Immediate Commit Model)","p":["The immediate commit creates new transactions for every call of an RPC. The transaction is then closed so no lingering data will occur."]},{"l":"Utilities","p":["This sub-directory contains UniConfig utilities."]}],[{"l":"JSONB Filtering","p":["Jsonb-filter is a query parameter that is used for filtering data based on one or more parameters. This filter is an effective mechanism for filtering a list of items. Using the jsonb-filter we can retrieve only those list items that meet the defined conditions.","Currently, we have two options of how to use the JSONB filtering functionality."]},{"l":"Database JSONB Filtering","p":["The query parameter is located in the URI. This option is faster because filtering is happening on the database side but this filtering has fewer features."]},{"l":"Application JSONB Filtering","p":["A new Content-Type is added. The query parameter is added in the body. Additional query parameters can be chained (sort by, limit, fields). This request is sent as a POST request. This filtering adds more features, but it is happening on the UniConfig application side which will be slower than the database filtering."]}],[{"l":"Application JSONB Filtering","p":["Application JSONB filtering supports either the dot notation:","or the bracket–notation:"]},{"l":"Jsonb-filter expression","p":["Every filter operation is sent using a POST request. Additionally, a new Content-Type header has been made for application JSONB Filtering. An example can be seen below:","The filter is located in the body of the request, not in the URI. Since it is located in the body, there is no need to escape characters. The body structure looks like this:","If the user wants to filter the list elements based on name, the query filter would look like this:","By default, the filter returns the same output structure as when calling a GET request. There is an option to add the whole parent structure, where the body will look like this:","This will filter out all the elements in the list whose name is foo."]},{"l":"Operators","p":["..",".","[?()]","['' (, '')]","[ (, )]","[start:end]","@","*","$","Array index or indexes.","Array slice operator.","Bracket-notated child or children.","Deep scan. Available anywhere a name is required.","Description","Dot-notated child.","Filter expression. Expression must evaluate to a boolean value.","Operator","Operators mentioned in the table below are used to construct a path.","The current node being processed by a filter predicate.","The root element to query. This starts all path expressions.","Wildcard. Available anywhere a name or numeric are required."]},{"l":"Functions","p":["add an item to the json path output array","append(X)","avg()","concat(X)","Description","Double","Functions can be called at the end of the query path. The input to the function is the output of the path expression. The function output is dictated by the function itself.","Integer","keys()","length()","like input","max()","min()","Operator","Output Type","Provides a concatinated version of the path output with a new item","Provides the average value of an array of numbers","Provides the length of an array","Provides the max value of an array of numbers","Provides the min value of an array of numbers","Provides the property keys (An alternative for terminal tilde ~)","Provides the standard deviation value of an array of numbers","Provides the sum value of an array of numbers","Set","stddev()","sum()"]},{"l":"Filter Operators","p":["!=","<","<=","==","=~",">",">=","A double quote: [?(@.name == \"foo\")]","A single quote: [?(@.name == 'foo')]","anyof","Description","empty","Filters are logical expressions used to filter arrays. A typical filter would be [?(@.age > 18)] where @ represents the current element being processed. More complex filters can be created with logical operators && and ||. String literals must be enclosed by:","in","left (array or string) should be empty","left does not exists in right","left exists in right [?(@.size in ['S', 'M'])]","left has an intersection with right [?(@.sizes anyof ['M', 'L'])]","left has no intersection with right [?(@.sizes noneof ['M', 'L'])]","left is a subset of right [?(@.sizes subsetof ['S', 'M', 'L'])]","left is equal to right (note that 1 is not equal to '1')","left is greater than or equal to right","left is greater than right","left is less or equal to right","left is less than right","left is not equal to right","left matches regular expression [?(@.name =~ /foo.*?/i)]","nin","noneof","Operator","size","size of left (array or string) should match right","subsetof"]},{"l":"Jsonb-filter examples","p":["$..interface[?(@.speed <= $['fast'])]","$..interface[?(@.type =~/.* Csmacd/i)]","$..name","$.ietf-interfaces:interfaces..type","$.ietf-interfaces:interfaces.*","$.ietf-interfaces:interfaces.interface.length()","$.ietf-interfaces:interfaces.interface[-2:]","$.ietf-interfaces:interfaces.interface[-2]","$.ietf-interfaces:interfaces.interface[:2]","$.ietf-interfaces:interfaces.interface[?(@.enabled)]","$.ietf-interfaces:interfaces.interface[?(@.speed >= 10)]","$.ietf-interfaces:interfaces.interface[*].name","$.ietf-interfaces:interfaces.interface[0,1]","$.ietf-interfaces:interfaces.interface[1:2]","$.ietf-interfaces:interfaces.interface[2:]","$.ietf-interfaces:interfaces.interface[2]","All interfaces from index 0 (inclusive) until index 2 (exclusive)","All interfaces from index 1 (inclusive) until index 2 (exclusive)","All interfaces matching regex (ignore case)","All interfaces that are not 'fast'","All interfaces that have the enabled element","All interfaces whose speed is greater or equal than 10","All names","All things under interfaces","Description","Interface number two from tail","JsonPath","Suppose we have the following data, and we want to do some filtering on them.","The first two books","The last two interfaces","The names of all interfaces","The number of interfaces","The second to last book","The third interface","The type of everything"]}],[{"l":"Database JSONB Filtering","p":["The example of using the jsonb-filter query parameter: parent-path?jsonb-filter=expression","PostgreSQL documentation: JSON Functions and Operators"]},{"l":"Jsonb-filter expression","p":["!","!=","{$/Cisco-IOS-XR-ifmgr-cfg:interface-configurations/interface-configuration=%28%23act,GigabitEthernet0/0/0/2%29}","{$/frinx-openconfig-interfaces:interfaces/interface=%28%23MgmtEth0/RP0/CPU0/0%29}","&&","<","<=","<>","==",">",">=","||","Absolute path","Boolean AND","Boolean NOT","Boolean OR","Composite key:","Description","Equality operator","exists","false","Greater-than operator","Greater-than-or-equal-to operator","In this case, a path must be prefixed with $. This path must start with a top-level parent container","In this case, the path must be prefixed with <@>. This path is relative to the parent-path","is unknown","Less-than operator","Less-than-or-equal-to operator","like_regex","Non-equality operator","Non-equality operator (same as !=)","null","Operator","Path","Relative path","Single key:","Sometimes especially absolute paths can contain a key of some item with special characters. In this case it is necessary wrap this key in a special syntax (#example-key-name) and also encode these wrapping symbols - %28%23example-key-name%29. If the key is a composite key, it is necessary to wrap the whole key with these symbols. If the user is not sure if the path contains special characters, it is always recommended to use this special syntax.","starts with","Tests whether the first operand matches the regular expression given by the second operand","The base expression must contain path, operator and value. The jsonb-filter can contain one or more expressions joined with AND(&&) or OR (||) operator. if the && operator is used it must be encoded.","The last element of the jsonb-filter expression is a value based on which the user wants to filter the data.","The path to the data that the users want to filter. The path can be:","true","Value","Value used to perform a comparison with JSON false literal","Value used to perform a comparison with JSON null value","Value used to perform a comparison with JSON true literal","Value/Predicate Description","When the path is constructed then the user can use one of the operators in the table below"]},{"l":"Jsonb-filter examples","p":["1. Examples of using the relative paths in the jsonb-filter","Example of filtering the list of interfaces based on the enabled parameter where the equality operator is used as the operator","Example of filtering the list of interfaces based on the mtu parameter where the less-than is used as the operator","Example of filtering the list of interfaces based on the name parameter where the like_regex is used as the operator","Example of filtering the list of interfaces where a combination of expressions is used","Example of filtering the list of interfaces where the exists operator is used","2. Example of using the absolute path in the jsonb-filter","Example of filtering the list of interfaces based on the name parameter where equality operator is used as the operator. Interface name\"GigabitEthernet0/0/0/2\" is a key value that contains slashes. For this reason, it is necessary to wrap this key into wrapping symbols(#GigabitEthernet0/0/0/) and also encode these symbols%28%23GigabitEthernet0/0/0/2%29."]}],[{"l":"Snapshot Manager","p":["The snapshot manager creates and deletes UniConfig snapshots of actual UniConfig topology. Multiple snapshots can be created in the system.","Snapshots may be used for manual rollback. Manual rollback enables simple reconfiguration of the entire network using one of the previous states saved in snapshots. That means that UniConfig nodes in config datastore are replaced with UniConfig snapshot nodes."]},{"l":"Create snapshot"},{"l":"Delete snapshot"},{"l":"Replace config with snapshot"},{"l":"Obtain snapshot metadata"}],[{"l":"Obtaining snapshots-metadata","p":["Snapshots metadata contains list of created snapshots with the date of creation and list of nodes."]}],[{"l":"RPC create-snapshot","p":["RPC creates a snapshot from the nodes in UniConfig topology. Later, this snapshot can be used for manual rollback. RPC input contains the name of the snapshot topology and nodes that the snapshot will contain. Output of the RPC describes the result of operation and matches all input nodes. You cannot call an RPC with empty target-nodes. If one node failed for any reason, RPC will be fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains the name for the topology snapshot and nodes that the snapshot contains. RPC output contains the result of operation."]},{"l":"Failed Example","p":["The RPC input includes nodes that will be contained in the snapshot, but a snapshot name is missing. RPC output contains the result of the operation."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains a name for the topology snapshot and a node that will be contained in the snapshot. One has not been mounted yet. RPC output contains the result of the operation."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input does not contain the target nodes, so the RPC can not be executed."]}],[{"l":"RPC delete-snapshot","p":["RPC removes the snapshot from CONFIG datastore of UniConfig transaction. RPC input contains the name of the snapshot topology which should be removed. RPC output contains result of the operation."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains the name of the snapshot topology which should be removed. RPC output contains the results of the operation."]},{"l":"Failed example","p":["RPC input contains the name of the snapshot topology which should be removed. The input snapshot name does not exist. RPC output contains the results of the operation."]}],[{"l":"RPC replace-config-with-snapshot","p":["The RPC replaces the nodes in UniConfig topology in the CONFIG datastore with selected nodes from specified snapshot. The RPC input contains the name of the snapshot topology and the target nodes which should replace the UniConfig nodes in the CONFIG datastore. Output of the RPC describes the result of the operation and matches all input nodes. You cannot call an RPC with empty target-nodes. If one node failed for any reason, RPC will be fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. RPC output contains the result of the operation."]},{"l":"Failed Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. The snapshot with name (snapshot2) has not been created yet. RPC output contains the result of the operation."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. The snapshot name is missing in the RPC input. The RPC output contains the result of the operation."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. One node is missing in snapshot1 (IOSXRN). RPC output contains the result of the operation."]},{"i":"failed-example-3","l":"Failed Example","p":["RPC input does not contain the target nodes, so RPC can not be executed."]}],[{"l":"Subtree Manager","p":["The subtree manager copies (merge/replace) subtrees between source and target paths in Configuration or Operational datastore of UniConfig. When one of these RPCs is called, Subtree Manager (SM) reads the configuration from the source path and according to type of operation(merge / replace), copies the subtree data to target path. Target path is a parent path UNDER which data is copied. SM also distinguishes type of source / target datastore.","All RPCs support merging/replacing of configuration between two different schemas ('version drop' feature). This feature is handy, when it is necessary to copy some configuration between two mounted nodes that are described by slightly different YANG schemas. The following changes between schemas are tolerated:","Skipping non-existing composite nodes and leaves,","Adjusting namespace and revision in node identifiers, only name of nodes must match with target schema,","Moving nodes between choice and augmentation schema nodes,","Adjusting value format to target type definition of leaf or leaf-list schema node."]},{"l":"RPC copy-one-to-one","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC copy-one-to-many","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC copy-many-to-one","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC calculate-subtree-diff","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC calculate-subtree-git-like-diff","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC bulk-edit","p":["Applies multiple modifications to a list of target nodes. RPC bulk-edit"]}],[{"l":"RPC bulk-edit","p":["The bulk-edit operation can be used to modify multiple configuration subtrees under multiple target nodes from the 'uniconfig', 'templates' or 'unistore' topology (the same list of modifications are applied to all listed target nodes). The bulk-edit operation is executed atomically - either all modifications are applied on all target nodes successfully, or the operation fails and the configuration is not touched in the UniConfig transaction. This RPC also benefits from parallel processing of changes per target node."]},{"l":"RPC input","p":["RPC input specifies a list of target nodes and a list of modifications that must be applied under target nodes:","Description of input fields:","topology-id(mandatory): Identifier for the topology which contains all target nodes. Currently supported topologies: uniconfig, templates, unistore.","node-id(optional): List of target nodes identifiers residing in the specified topology. If this field is not specified or is empty, RPC is executed on all available nodes in the specified topology.","edit(mandatory with at least 1 entry): List of modifications. Each modification is uniquely identified by the 'path' key. Modifications are applied in the preserved user-defined order.","Description of fields in the edit entry:","path(mandatory): Path encoded using the RFC-8040 format. Specified as relative path to root'configuration' container. If this leaf contains a single character '/', the path points to the whole configuration. If this path contains a list node without key, the operation is applied to all list node elements.","operation(mandatory): Operation that must be executed on the specified path. Supported operations are 'merge', 'replace', and 'remove'. Operations 'merge' and 'replace' requires to also specify input 'data'.","data(optional): Content of the replaced or merged data without wrapping parent element(the last element of the path is not declared in the 'data', see examples on how to correctly specify content of this leaf in different use-cases).","Supported operations:","merge: Supplied value is merged with the target data node.","replace: Supplied value is used to replace the target data node.","remove: Delete target node if it exists."]},{"l":"RPC output","p":["RPC output contains the global status of the executed operation and per-node status.","Description of output fields:","overall-status: Status of operation. If RPC execution fails on at least one of the target nodes, the overall status is set to 'fail'. Otherwise, status is set to 'complete'.","error-message: \"Reason for the failure. Used if there is a structural error in the RPC input that does not relate to one specific target node.\"","node-result: Results of RPC execution divided per target node ('node-id' is the key of the list).","Description of fields in the node-result entry:","node-id: Identifier for the target node.","status: Status of bulk-edit operation on this node. This value is set to 'complete' only if all modifications have been successfully written into UniConfig transaction (including other nodes). Otherwise, the value is set to 'fail'.","error-message: Reason for the failure. This field appears in the output only if RPC execution failed on this target node.","error-type: Categorized error type."]},{"l":"RPC examples"},{"l":"Successful example","p":["The following request demonstrates the application of six (6) modifications to four (4) templates:","Replace the value of the 'description' leaf.","Remove the 'snmp' container.","Replace the whole 'ssh' container.","Merge the configuration of the 'routing-protocol' list entry.","Merge the whole 'tree' list with the specified multiple list entries.","Replace the leaf-list 'services' with the provided array of strings.","The response contains the overall status 'complete' and per-node status 'complete' - all modifications have been successfully written into the UniConfig transaction."]},{"l":"Failed example","p":["The following example demonstrates the execution of a bulk-edit operation that fails on parsing one of the paths using YANG schemas of the device 'dev02'.","The RPC response contains the overall status 'fail'. There is one error message in the result of 'dev02'. Note that the 'dev01' result also contains the 'fail' status, as modifications have not been written to this node since another node ('dev02') failed during execution of the operation."]}],[{"l":"RPC calculate-subtree-diff","p":["This RPC creates a diff between the source topology subtrees and target topology subtrees. Supported features:","Comparison of subtrees under same network-topology node.","Comparison of subtrees between different network-topology nodes that use same YANG schemas.","Comparison of subtrees with different revisions of YANGs schema that are syntactically compatible(for example, different software versions of devices).","RPC input contains data-tree paths ('source-path' and 'target-path') and data locations('source-datastore' and 'target-datastore'). Data location is the enumeration of two possible values, 'OPERATIONAL' and 'CONFIGURATION'. The default value of 'source-datastore' is 'OPERATIONAL' and default value of 'target-datastore' is 'CONFIGURATION'.","RPC output contains a list of differences between source and target subtrees.","RPC calculate-subtree-dif"]},{"l":"RPC Examples"},{"i":"successful-example-computed-difference","l":"Successful example: Computed difference","p":["RPC calculate-subtree-diff input has a path to two different testtool devices with different YANG schemas. Output contains a list of statements representing the diff."]},{"i":"successful-example-no-difference","l":"Successful example: No difference","p":["The following output demonstrates a situation with no changes between specified subtrees."]},{"i":"failed-example-invalid-value-in-input-field","l":"Failed example: Invalid value in input field","p":["RPC calculate-subtree-diff has an improperly defined datastore (AAA) within the input. Output describes the Allowed values [CONFIGURATION, OPERATIONAL]."]},{"i":"failed-example-missing-mandatory-field","l":"Failed example: Missing mandatory field","p":["RPC input does not contain the mandatory source path."]}],[{"l":"RPC calculate-subtree-git-like-diff","p":["This RPC creates a diff between the source topology subtrees and target topology subtrees. Supported features:","Comparison of subtrees under same network-topology node.","Comparison of subtrees between different network-topology nodes that use same YANG schemas.","Comparison of subtrees with different revisions of YANGs schema that are syntactically compatible(for example, different software versions of devices).","RPC input contains data-tree paths ('source-path' and 'target-path') and data locations('source-datastore' and 'target-datastore'). Data location is the enumeration of two possible values, 'OPERATIONAL' and 'CONFIGURATION'. The default value of 'source-datastore' is 'OPERATIONAL' and default value of 'target-datastore' is 'CONFIGURATION'.","RPC output contains differences between source and target subtrees formatted in a git-like style. The changes are grouped by root entities in the configuration."]},{"l":"RPC Examples"},{"i":"successful-example-computed-difference","l":"Successful example: Computed difference","p":["RPC calculate-subtree-git-like-diff input includes the path to two interfaces on different nodes. Both data locations are placed in the CONFIGURATION datastore. Output contains a list of all the changes. Multiple changes that occur under the same root element are merged together."]},{"i":"successful-example-no-difference","l":"Successful example: No difference","p":["The following output demonstrates a situation with no changes between specified subtrees."]},{"i":"failed-example-missing-mandatory-field","l":"Failed example: Missing mandatory field","p":["RPC input does not contain the mandatory target path."]}],[{"l":"RPC copy-many-to-one","p":["RPC input contains:","type of operation - 'merge' or 'replace',","type of source datastore - CONFIGURATION / OPERATIONAL,","type of target datastore - CONFIGURATION / OPERATIONAL,","list of source paths in RFC-8040 URI formatting,","target path in RFC-8040 URI formatting (target path denotes parent entities under which configuration is copied).","Target datastore is optional input field. By default, it is the same as source datastore. Other input fields are mandatory, so it is forbidden to call RPC with missing mandatory field. Output of RPC describes result of copy to target path RPC. If one path failed for any reason, RPC will be failed overall and no modification will be done to datastore - all modifications are done in the single atomic transaction.","Description of RPC copy-many-to-one is on figure below."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The following example demonstrates execution of copy-many-to-one RPC with 3 source paths. Data that is described by these source paths('snmp', 'access', and 'ntp' containers under three different nodes) will be copied under root 'system:system' container ('dev04' node)."]},{"l":"Failed example","p":["The following example shows failed copy-many-to-one RPC. One of the source paths points to non-existing schema node ('invalid:invalid')."]}],[{"l":"RPC copy-one-to-many","p":["RPC input contains:","type of operation - 'merge' or 'replace',","type of source datastore - CONFIGURATION / OPERATIONAL,","type of target datastore - CONFIGURATION / OPERATIONAL,","source path in RFC-8040 URI formatting, list of target paths in RFC-8040 URI formatting (target paths denote parent entities under which configuration is copied).","Target datastore is optional input field. By default, it is the same as source datastore. Other input fields are mandatory, so it is forbidden to call RPC with missing mandatory field. Output of RPC describes result of copy to target paths RPC. If one path failed for any reason, RPC will be failed overall and no modification will be done to datastore - all modifications are done in the single atomic transaction.","Description of RPC copy-one-to-many is on figure below."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The following example demonstrates merging of ethernet interface configuration from single source into interfaces 'eth-0/2' (node'dev02'), 'eth-0/3' (node 'dev02'), 'eth-0/100' (node 'dev03'), and'eth-0/200' (node 'dev03')."]},{"l":"Failed example","p":["The next example shows failed copy-one-to-many RPC - both target paths are invalid since 'ext' list schema nodes doesn't contain'interfaces:interfaces' child container."]}],[{"l":"RPC copy-one-to-one","p":["RPC input contains:","type of operation - 'merge' or 'replace',","type of source datastore - CONFIGURATION / OPERATIONAL,","type of target datastore - CONFIGURATION / OPERATIONAL,","source path in RFC-8040 URI formatting,","target path in RFC-8040 URI formatting (target path denote parent entities under which configuration is copied).","Target datastore is optional input field. By default, it is the same as source datastore. Other input fields are mandatory, so there is forbidden to call RPC with missing mandatory field. Output of RPC describes result of copy to target path operation. If RPC failed for some reason, RPC will be failed and no modification will be done to datastore.","Description of RPC copy-one-to-one is on figure below."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The following example demonstrates coping of whole 'org:orgs' container from 'dev01' to 'dev02' node under 'uniconfig' topology. Replace operation is used."]},{"l":"Failed example","p":["The following example shows failed copy-one-to-one RPC. Input contains specified source datastore (target datastore is the same), merge operation, source path, and target path. In that example target path is invalid, because it doesn't contain 'org:orgs' container in the schema tree."]}],[{"l":"Transaction Log","p":["The transaction log consists of a transaction tracker and a revert-changes RPC. The transaction tracker stores information called transaction-metadata about performed transactions into the operational snapshot. Whereas revert-changes RPC can be used to revert changes that have been made in a specific transaction. A user only need to have ID of transaction for that. One or more transactions can be reverted using one revert-changes RPC."]},{"l":"RPC revert-changes"},{"l":"Transaction tracker"}],[{"l":"RPC revert-changes","p":["This RPC revert changes that were configured within one transaction. If a user wants to revert single transaction or multiple transactions, he must find out transaction-ids and paste them into the body of RPC. The transaction-id is part of the transaction-metadata, that is created by a transaction tracker after commit/checked-commit RPC.","RPC revert-changes updates data only in the CONFIGURATION Snapshot. If we want to write reverted data to the device, we must use RPC commit after RPC revert-changes."]},{"l":"Ignore non-existent nodes","p":["If a user wants to revert multiple transactions, some transactions metadata may contain nodes that do not currently exist in UniConfig. In this case, the RPC fails. The user has a choice of two options:","remove transaction that contain non-existent nodes from the request body","add 'ignore-non-existing-nodes' parameter to the RPC request body with a value of 'true' (default: 'false')","If the user does not use the 'ignore-non-existing-nodes' parameter, the default value 'false' is automatically used."]},{"l":"RPC Examples"},{"l":"Successful examples","p":["Before reverting a transaction we need to know its ID. We will use the GET request to display all stored transaction-metadata.","Reverting changes of a single transaction.","Reverting changes of multiple transactions.","Reverting changes of multiple transactions, where the transaction with id '2c4c1eb5-185a-4204-8021-2ea05ba2c2c1' contains non-existent node'R1'. In this case 'ignore-non-existing-nodes' with a value of 'true' is used, and therefore the RPC will be successful."]},{"l":"Failed example","p":["This is a case when revert-changes request contains a non-existent transaction in the request body.","Reverting changes of multiple transactions, where the transaction metadata with id '2c4c1eb5-185a-4204-8021-2ea05ba2c2c1' contains non-existent node. In this case 'ignore-non-existing-nodes' with a value of 'false' is used, and therefore the RPC fails."]}],[{"l":"Transaction tracker"},{"l":"Introduction","p":["The transaction tracker is responsible for saving a transaction-metadata to the operational snapshot after successfully executed commit/checked-commit RPC. The transaction-metadata contains information about performed transactions, such as:","transaction-id- Identifier of transaction.","type-of-commit-time- Timestamp of either 'last-commit-time', when the transaction was successful or 'failed-commit-time', when the transaction failed. If multiple devices are configured, then the 'last-commit-time' will contain the timestamp of the last update on the last device.","metadata- Items in this field represent nodes that have been configured in the one transaction. Each item contains a diff item with additional information.","diff- Items in this field are a specific changes. Each item contains path to changes, data before change and data after change. In case of a failed transaction this information in not present.","topology- On which topology is a node installed. Can be 'uniconfig' or 'unistore'.","Data-before is visible only if data was updated or deleted. Data-after is visible only if data was updated or created.","transaction-tracker]"]},{"l":"Configuration","p":["The UniConfig stores transaction metadata only if the'lighty-uniconfig-config.json' file contains a \"maxStoredTransactions\" parameter in \"transactions\" container and its value is greater then 0. It is necessary to make this setting before running UniConfig, otherwise parameter \"maxStoredTransactions\" will be '0' (default value) and transaction-log will be disabled."]},{"l":"Show transaction-metadata","p":["The response to this GET request contains all stored transaction-metadata, their IDs and other items such as node-id, updated data before update and after update, etc."]}],[{"l":"UniConfig Node Manager","p":["An additional git like diff RPC was created so it shows all the changes grouped under root elements in a git-like style.","In the case where the configuration of one device fails, the UNM executes automatic rollback where the previous configuration is restored on all modified devices.","RPC calculate-diff","RPC calculate-git-like-diff","RPC check-installed-nodes","RPC checked-commit","RPC commit","RPC compare-config","RPC get-installed-nodes","RPC health","RPC install-multiple-nodes","RPC is-in-sync","RPC replace-config-with-operational","RPC sync-from-network","RPC uninstall-multiple-nodes","RPC validate","Synchronization from the network reads configuration from devices and stores it as an actual state to the OPER DS.","The responsibility of this component is to maintain configuration on devices based on intended configuration. Each device and its configuration is represented as a node in the uniconfig topology and the configuration of this node is described by using OpenConfig YANG models. The Northbound API of Uniconfig Manager (UNM) is RPC driven and provides functionality for commit with automatic rollback and synchronization of configuration from the network.","When a commit is called, the UNM creates a diff based on intended state from CONFIG DS and actual state from OPER DS. This Diff is used as the basis for device configuration. UNM prepares a network wide transaction which uses Unified mountpoints for communication with different types of devices."]}],[{"l":"RPC calculate-diff","p":["This RPC creates a diff between the actual UniConfig topology nodes and the intended UniConfig topology nodes. The RPC input contains a list of UniConfig nodes to calculate the diff. Output of the RPC contains a list of statements representing the diff. It also matches all input nodes. If RPC is called with empty list of target nodes, diff is calculated for each modified node in the UniConfig transaction. If some node fails for any reason, the RPC fails entirely"]},{"l":"RPC Examples"},{"l":"Successful Example","p":["The RPC calculate-diff input has two target nodes and the output contains a list of statements representing the diff."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC calculate-diff input does not contain the target nodes, calculate-diff will be invoked on top of all touched nodes in the transaction.","or"]},{"i":"successful-example-2","l":"Successful Example","p":["The RPC calculate-diff input has target node and there is no diff."]},{"l":"Failed Example","p":["The RPC calculate-diff input has target node. Nodes 'R2' has not been installed yet. The output describes the result of the calculate-diff RPC."]},{"i":"failed-example-1","l":"Failed Example","p":["The RPC calculate-diff input has two target nodes. One of the nodes,'R2', has not been installed yet. The output describes the result of the calculate-diff RPC."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error.","or"]}],[{"l":"RPC calculate-git-like-diff","p":["This RPC creates a diff between the actual UniConfig topology nodes and the intended UniConfig topology nodes. The RPC input contains a list of UniConfig nodes to calculate the diff. Output of the RPC contains a list of statements representing the diff in a git-like style. It checks for every touched node in the transaction if target nodes are not specified in the input. If some node fails, the RPC will fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["The RPC calculate-git-like-diff input has two target nodes and the output contains a list of statements representing the diff."]},{"i":"successful-example-1","l":"Successful Example","p":["The RPC calculate-git-like-diff input has no target nodes specified, so it will look for all touched nodes in the transaction, and the output will contain a list of all changes on different paths. Multiple changes that occur under the same path are merged together."]},{"i":"successful-example-2","l":"Successful Example","p":["The RPC calculate-git-like-diff input has target node and there is no diff."]},{"l":"Failed Example","p":["The RPC calculate-git-like-diff input has target node. Nodes 'R2' has not been installed yet. The output describes the result of the calculate-diff RPC."]},{"i":"failed-example-1","l":"Failed Example","p":["The RPC calculate-git-like-diff input has two target nodes. One of the nodes,'R1', has not been installed yet. The output describes the result of the calculate-git-like-diff RPC."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC check-installed-nodes","p":["This RPC checks if devices included in the input are installed by looking for the database content of each device. If content is found, the device is installed."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains a device while no devices are installed."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains devices (R1 and R2) and device R1 is installed."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains devices (R1 and R2) and both devices are installed."]},{"l":"Failed Example","p":["RPC input does not specify any nodes."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input is missing the target-nodes container."]}],[{"l":"RPC checked-commit","p":["The trigger for execution of the checked configuration is RPC checked-commit. A checked commit is similar to an RPC commit, but it also checks if nodes are in sync with the network before it starts configuration. RPC fails if any node is out of sync. Output of the RPC describes the result of the commit and matches all modified nodes in the UniConfig transaction. If one node failed for any reason, RPC will fail entirely.","In comparison to commit RPC, there is one additional phase between 'lock and validate configured nodes' and 'write configuration into device' phases:","Lock and validate configured nodes","Check if nodes are in-sync with state on devices","Write configuration into device","Validate configuration","Confirmed commit","Confirming commit (submit configuration)","Following diagram captures check if configuration fingerprints in the transaction datastore and device are equal.","There is a difference between fingerprint-based validation in the phases 1 and 2. The goal of the first phase is validation if other transaction has already changed the same node by comparison of fingerprint in the UniConfig transaction and in the database. On the other side, the second phase validates if fingerprint in the transaction equals to fingerprint on the device - if another system / directly user via CLI has updated device configuration since the beginning of the transaction."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["Configuration of nodes 'R1' and 'R2' has been changed in the transaction. Both 'R1' and 'R2' are in-sync with actual state on the device. RPC checked-commit input invoke all touched nodes."]},{"l":"Failed Example","p":["Configuration of nodes 'R1' and 'R2' has been changed in the transaction. Both 'R1' and 'R2' are in-sync with actual state on the device. Node 'R1' has failed due to improper configuration. The output describes the result of the checked-commit RPC."]},{"i":"failed-example-1","l":"Failed Example","p":["Configuration of nodes 'R1' has been changed in the transaction. Node 'R1' is in-sync with actual state on the device. Node 'R1' has failed on the changed fingerprint. The output describes the result of the checked-commit."]},{"i":"failed-example-2","l":"Failed Example","p":["Node 'R2' has lost connection."]},{"i":"failed-example-3","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC commit","p":["1. Lock and validate configured nodes","2. Write configuration into device","3. Validate configuration","4. Confirmed commit","5. Confirming commit (submit configuration)","Configuration phase","Confirmed commit","Confirmed commit - It is used for locking of device configuration, so no other transaction can touch this device. This phase can be skipped with \"do-confirmed-commit\" flag.","Confirming commit","Confirming commit (submit configuration) - Persisting all changes on devices and in the PostgreSQL database. UniConfig transaction is closed.","If one of the nodes uses a confirmed commit (phase 4), which does not fail, then it is necessary to issue the submitted configuration (phase 5) within the timeout period. Otherwise, the node configuration issued by the confirmed commit will be reverted to its state before the confirmed commit (i.e. confirmed commit makes only temporary configuration changes). The timeout period is 600 seconds (10 minutes) by default, but the user can change it in the installation request.","Lock and validate configured nodes - Locking all modified nodes using PostgreSQL advisory locks and validation of fingerprints - if another transaction tries to commit overlapping nodes or different transaction has already changed one of the nodes, then commit will fail at this step.","Locking nodes","Next diagram describe the first phase of commit RPC - locking of changes nodes in the PostgreSQL database and verification if other transaction has already committed overlapping nodes.","Next diagrams describe all 5 commit phases in detail:","Rollback - It is used for restoring of configuration to previous state, if the configuration process fails. When configuring more devices in a single transaction and the process fails on one particular device, the rollback procedure will be applied to all touched devices. This is done by auto rollback procedure, which is by default turned on. It can be switched off by setting up'do-rollback' flag in input of Commit RPC request. Then only failed devices will be rollbacked.","Rollback operation","RPC commit Commit invoke all touched nodes in transaction. There are no target nodes in the RPC input.","The 'skip-unreachable-nodes' flag controls whether unreachable nodes are skipped when the RPC commit is sent. If set to 'true', nodes that are not reachable are skipped and others are configured. The default value is 'false'.","The configuration of nodes consists of the following phases:","The external application stores the intended configuration under nodes in the UniConfig topology. The trigger for execution of configuration is an RPC commit. Output of the RPC describes the result of the commit.","The last diagram shows rollback procedure that must be executed after failed commit on nodes that have already been configured and don't support 'candidate' datastore.","The third and fourth phases take place only on the nodes that support these operations. If one node failed in the random phase for any reason the RPC will fail entirely. After commit RPC, UniConfig transaction is closed regardless of the commit result.","Validate configuration - Validation of written configuration from the view of constraints and consistency. This phase can be skipped with \"do-validate\" flag.","Validation phase","Write configuration into device - Pushing calculated changes into device without committing of these changes."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["UniConfig commits nodes 'R1' and 'R2' that has been changed in the actual transaction."]},{"i":"successful-example-1","l":"Successful Example","p":["Nodes 'R1' and 'R2' has been changed. RPC commit input has the flag to disable confirmed-commit phase. UniConfig commits all touched nodes."]},{"i":"successful-example-2","l":"Successful Example","p":["If there are not any touched nodes, the request will finish successfully."]},{"l":"Failed Example","p":["Node 'R1' has failed because of failed validation phase."]},{"i":"failed-example-1","l":"Failed Example","p":["Node 'R1' has failed because the confirmed commit failed. Validation phase was skipped due to false \"do-validate\" flag."]},{"i":"failed-example-2","l":"Failed Example","p":["Node 'R1' has failed because of the time delay between the confirmed commit and the submitted configuration."]},{"i":"failed-example-3","l":"Failed Example","p":["Node 'R1' has failed due to improper configuration."]},{"i":"failed-example-4","l":"Failed Example","p":["Node 'R1' has lost connection."]},{"i":"failed-example-5","l":"Failed Example","p":["Node 'R1' has failed because of wrong configuration. In this case validation, confirm-commit and auto-rollback were switched off. Because auto-rollback is switched off, configuration of 'R1' device was successful. However, this can be done only if validation and confirm-commit phase were successful or skipped, otherwise configuration of 'R1' device would also fail."]},{"i":"failed-example-6","l":"Failed Example","p":["Configuration of nodes 'R1' nad 'R2' has been changed in the transaction and both are in-sync with actual state on the device. Then connection of node 'R2' has been lost. RPC commit input has the flag to skip unreachable nodes set to true. Result of the commit RPC describes success of 'R1' node and shows list of unreachable nodes."]}],[{"l":"RPC compare-config","p":["This RPC is a combination of the sync-from-network and calculate-diff RPCs. If one of those RPCs fails, this one also fails with no changes made.","The purpose of this RPC is to synchronize configurations from network devices to UniConfig nodes in the Configuration datastore of the UniConfig transaction.","The RPC input contains a list of UniConfig nodes which configuration should be compared to actual configuration in the transaction The output of the RPC describes the result of compare-config and matches all input nodes with a list of statements representing the diff."]},{"l":"RPC Examples"},{"l":"Successful Example"},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, configuration of all touched nodes in the transaction is compared to synced device configuration."]},{"i":"successful-example-2","l":"Successful Example","p":["The RPC compare-config input has target node and there is no diff."]},{"l":"Failed Example","p":["The RPC compare-config input has two target nodes. One of the nodes,'R2', has not been installed yet. The output describes the result of the sync-from-network.","If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]},{"i":"failed-example-1","l":"Failed Example"}],[{"l":"RPC get-installed-nodes","p":["This RPC returns all installed devices from a specified topology.","If no topology is specified, the output may contain devices from multiple topologies (CLI, NETCONF, gNMI). In this case, devices must be installed with the install request parameter \"uniconfig-config:install-uniconfig-node-enabled\" set to \"true\". The RPC with no topology looks for nodes installed under the UNICONFIG topology by default."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The RPC contains no topology defined in input and device called 'R1' is installed in the NETCONF topology. With parameter\"uniconfig-config:install-uniconfig-node-enabled\":\"true\" in install request is installed under UNICONFIG topology."]},{"i":"successful-example-1","l":"Successful example","p":["The RPC input contains no topology and device called 'R1' is installed in the NETCONF topology. With parameter \"uniconfig-config:install-uniconfig-node-enabled\":\"false\" in install request is not installed under UNICONFIG topology."]},{"i":"successful-example-2","l":"Successful example","p":["The RPC input contains the GNMI topology and device called 'R1' is installed in the topology."]},{"i":"successful-example-3","l":"Successful example","p":["The RPC input contains the CLI topology, but no devices are installed in the topology."]}],[{"l":"RPC health","p":["This RPC checks if UniConfig is running. If database persistence is enabled it checks database connection too."]},{"l":"RPC Examples","p":["RPC health input is empty and RPC output contains result of operation.","Response when database persistence is disabled:","Response when database persistence is enabled and database connection is valid:","Response when database persistence is enabled and database connection is not valid:"]}],[{"l":"RPC install-multiple-nodes","p":["This RPC installs multiple devices at once. It uses the default install-node RPC. Devices are installed in parallel."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains two devices (R1 and R2)."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains devices (R1 and R2) and R2 uses two different protocols."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains two devices (R1 and R2) and R2 is already installed using CLI protocol."]},{"l":"Failed Example","p":["RPC input does not specify node-id."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains two devices using the same node-id."]}],[{"l":"RPC is-in-sync","p":["This RPC can be used for verification whether the specified nodes are in-sync with the current state in the Operational datastore of UniConfig transaction. This verification is done by comparison of configuration fingerprints. The configuration fingerprint on the device is compared with the last configuration fingerprint saved in the Operational datastore. A fingerprint is usually represented by a configuration timestamp or the last transaction ID. The is-in-sync feature is supported only for device types that have implemented translation units for the 'frinx-configuration-metadata' OpenConfig module (using cli units, netconf units, or uniconfig-native metadata units).","The RPC input contains a list of UniConfig nodes for which the verification should be completed ('target-nodes' field). Response comprises the operation status for each of the nodes that was specified in the RPC input. If the operation failed it is because the specified node has not been successfully installed or connection has been lost or uniconfig doesn't have support for reading of configuration fingerprint from specific device type. Calling RPC with empty list of target nodes will result in invocation of RPC for each node that has been modified in the UniConfig transaction.","Possible RPC outputs per target node:","'status' field with value 'complete' with set 'is-in-sync' boolean flag; is-in-sync feature is supported and the configuration fingerprints have been successfully compared.","'status' field with value 'fail' with set 'error-type' to'no-connection' and corresponding 'error-message'; Unified mountpoint doesn't exist because the connection has been lost or the node has not been mounted yet.","'status' field with value 'fail' with set 'error-type' to'uniconfig-error' and corresponding 'error-message'; reading of the fingerprint from the Operational datastore or Unified mountpoint has failed, or the configuration metadata parsing is not supported for the device type.","Execution of the 'is-in-sync' RPC doesn't modify the Operational datastore. The configuration fingerprint that is stored in the Operational datastore is not updated. 'Sync-from-network' RPC must be used for updating the last configuration fingerprint and the actual configuration state."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["the RPC input contains valid nodes for which the synchronization status must be checked ('R1' is synced while 'R2' is not synced):"]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, all touched nodes will be invoked."]},{"l":"Failed Example","p":["RPC input contains invalid node, the 'R1' doesn't support comparison of fingerprints(metadata translation unit has not been implemented for this device)."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains 2 nodes, the first one 'R1' is valid and synced, the second one ('R2') has not been installed yet. If there is one invalid node, Uniconfig operation will fail with 1 error entry in the response."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC replace-config-with-operational","p":["RPC replaces the UniConfig topology nodes in the Config datastore with UniConfig topology nodes from the Operational datastore. The RPC input contains a list of the UniConfig nodes to replace from the Operational to the Config datastore of the UniConfig transaction. Output of the RPC describes the result of the operation and matches all input nodes. If RPC is invoked with empty list of target nodes, operation will be invoked for all nodes modified in the UniConfig transaction. If one node failed for any reason, RPC will fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC replace-config-with-operational input has 2 target nodes and the RPC output contains the result of the operation."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, configuration of all touched nodes will be replaced by operational state."]},{"l":"Failed Example","p":["RPC input contains a list of the target nodes. Node 'R1' has not been installed yet. The RPC output contains the result of the operation."]},{"i":"failed-example-1","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC sync-from-network","p":["The purpose of this RPC is to synchronize configuration from network devices to the UniConfig nodes in the Operational datastore of UniConfig transaction. The RPC input contains a list of the UniConfig nodes where the configuration should be refreshed within the network. Output of the RPC describes the result of sync-from-network and matches all input nodes. Calling RPC with empty list of target nodes results in syncing configuration of all nodes that have been modified in the UniConfig transaction. If one node failed for any reason, the RPC will fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains nodes where configuration should be refreshed.","If RPC input does not contain the target nodes, all touched nodes in the transaction will be synced."]},{"l":"Failed Example","p":["RPC input contains a list of nodes where the configuration should be refreshed. Node 'R2' has not been installed yet."]},{"i":"failed-example-1","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC sync-to-network","p":["This RPC is a combination of sync-from-network and commit RPCs. If one of these RPCs fails the RPC will fail without any changes made.","The purpose of this RPC is to synchronize configuration from the UniConfig nodes in the Configuration datastore of UniConfig transaction to network devices. The RPC input contains a list of the UniConfig nodes which are to be updated on a network device. Output of the RPC describes the result of sync-to-network and matches all input nodes. Calling RPC with empty list of target nodes results in syncing configuration of all nodes that have been modified in the UniConfig transaction. If some node fails, the RPC fails entirely.","It is necessary for admin-state of UniConfig nodes, specified in the input, to be set to \"unlocked\"."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains nodes which are to be updated on the corresponding network device."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, operation will be invoked on top of all touched nodes in the transaction."]},{"l":"Failed Example","p":["If one or more input nodes are not set to admin-state 'unlocked' the request will result in an error pointing out nodes with the wrong admin-state."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains only one node with bad admin-state."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input contains 2 nodes, the first one 'R1' is valid, the second one 'R2' has not been installed yet. If there is at least one invalid node, operation will fail."]},{"i":"failed-example-3","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC uninstall-multiple-nodes","p":["This RPC uninstalls multiple devices at once. It uses the default uninstall-node RPC. Devices are uninstalled in parallel."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains two devices (R1 and R2)."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains devices (R1 and R2) and R2 is installed on two different protocols."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains two devices (R1 and R2) and R2 is already uninstalled on CLI protocol."]},{"l":"Failed Example","p":["RPC input does not specify node-id."]}],[{"l":"RPC validate","p":["The external application stores the intended configuration under nodes in the UniConfig topology. The configuration can be validated if it is valid or not. The trigger for execution of configuration validation is an RPC validate. RPC input contains a list of UniConfig nodes which configuration should be validated. Output of the RPC describes the result of the validation and matches all input nodes. It is valid to call this RPC with empty list of target nodes - in this case, all nodes that have been modified in the UniConfig transaction will be validated.","The configuration of nodes consists of the following phases:","Open transaction to device","Write configuration","Validate configuration","Close transaction","If one node failed in second (validation) phase for any reason, the RPC will fail entirely.","The validation (second phase) take place only on nodes that support this operation.","Validate RPC is shown in the figure bellow."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC validate input has 2 target nodes and the output describes the result of the successful validation."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, all touched node in the transaction will be validated."]},{"l":"Failed Example","p":["RPC commit input has 1 target node and the output describes the result of the validation. Node has failed because validation failed."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains 2 nodes, the first one 'R1' is valid, the second one 'R2' has not been installed yet. If there is one invalid node, Uniconfig will be evaluated nodes with fail."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"UniConfig properties","p":["UniConfig properties are application properties used to configure the application. They can be separated into three groups:","Runtime mutable properties can be modified in runtime (using the update-properties RPC), their changes take effect in runtime and the properties are persisted in the database.","Database persisted properties include all runtime mutable properties and some additional properties. These properties are stored in the database, which is always their primary source. With UniConfig Cloud Config, they remain constant across UniConfig instances in the same cluster and cannot be overridden via the application properties file.","Regular UniConfig properties comprise all the remaining properties. These properties can always be changed using the application.properties file and can differ between UniConfig instances.","Database persisted properties can be changed or read in application runtime without restarting UniConfig by using UniConfig Cloud Config and the following RPCs:","RPC read-properties","RPC update-properties"]}],[{"l":"RPC read-properties","p":["The read-properties RPC reads default properties from the database. If a specified property key does not exist in the database, they key is returned in the ignored keys section. The RPC works the same whether UniConfig Cloud Config is enabled or disabled.","read","If UniConfig Cloud Config is disabled, the read-properties RPC reads property values from the database. These values may differ from values in the application instance."]},{"l":"RPC examples"},{"l":"Successful example","p":["RPC input contains default property keys."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains properties that are not default properties or are private (crypto keys and crypto types)."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input consists of properties that do not exist in the database."]}],[{"l":"RPC update-properties","p":["The update-properties RPC is used to update property values. If UniConfig Cloud Config is enabled, it also calls Refresh Bus Endpoint to update properties in runtime for all connected UniConfig instances.","The RPC only updates default properties, except for crypto properties for which there are separate RPCs ( change-encryption-status and change-encryption-keys).","RPC sequence diagram with UniConfig Cloud Config enabled:","update-with-ucc","If UniConfig Cloud Config is disabled, the RPC only updates property values in the the database. The application instance continues to use the old property values, which can cause confusion.","Additionally, if a new UniConfig instance is started after properties have been updated, that instance will use the updated property values from the database. UniConfig instances will therefore use different values for the same property, as described in the diagram below.","We recommend that you use this RPC with UniConfig Cloud Config. The exception is callbacks.access-token, which is always up to date.","RPC sequence diagram with UniConfig Cloud Config disabled:","update-without-ucc"]},{"l":"RPC examples"},{"l":"Successful example","p":["RPC input contains the default properties with correct values."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains the crypto default property."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains an incorrect property key."]},{"l":"Failed example","p":["RPC input contains default properties with incorrect values."]},{"i":"failed-example-1","l":"Failed example","p":["RPC input contains default properties with incorrect values."]}],[{"l":"Utilities","p":["Utilities are simple programs that are part of the UniConfig distribution. After unpacking and building the distribution, utilities can be found in the 'utils' subdirectory."]},{"l":"YANG Packager"},{"l":"Difference between OpenAPI specifications"}],[{"l":"Difference between OpenAPI specifications"},{"l":"Introduction","p":["The Uniconfig distribution includes a program for checking the difference between OpenAPI specifications. After building and unpacking the distribution, you can find the program in the 'utils' directory as a shell script called called 'show_swagger_diff.sh'.","The program uses OpenAPI-diff to generate OpenAPI differences."]},{"l":"Usage","p":["The ./show_swagger_diff.sh script contains four arguments. Each one has its own identifier, so you can give arguments in any order. All arguments are optional as default values are included for each argument.","--former, -f /path/to/former/yaml/files- optional argument. Path to previous OpenAPI specifications (.yaml files). The default path is 'openapi_diff/old'.","--new, -n /path/to/new/yaml/files- optional argument. Path to new OpenAPI specifications (.yaml files). The default path is 'openapi_diff/new'.","--output, -o /path/to/output- optional argument. Path for the html output file with differences. The default path is 'openapi_diff'.","-s- optional argument. Silent printing, includes less information.","Bash script ./show_swagger_diff.sh also includes a simple help facility. There are two options for showing the help text:","./show_swagger_diff.sh -h","./show_swagger_diff.sh --help","The script only accepts YAML files."]},{"l":"Example use case"},{"l":"Default usage","p":["This example shows basic usage of the script with and without optional arguments. Open a terminal and the '../utils' directory, and run the following command:","OR"]},{"l":"Usage with non-existent input path","p":["This example shows basic usage of the script where some specified input directories do not exist. Open a terminal and the '../utils' directory, and run the following command:"]}],[{"l":"YANG packager"},{"l":"Introduction","p":["YANG packager is a simple program which is part of the UniConfig distribution. User can finds it in the utils/ directory after building and unpacking the UniConfig distribution. User can use it by simple shell script called'convertYangsToUniconfigSchema.sh'. YANG packager is responsible for:","validation of user-provided YANG files","copying valid YANG files to the user-defined directory","informing the user about conversion process"]},{"l":"Usage","p":["-d /path/to/default- optional argument. Sometimes some YANG files need additional dependencies that are not provided in source directories. In this case it is possible to use path to the 'default' directory which contains additional YANG files. If there is this missing YANG file, YANG packager will use it.","-enableSwagger- optional argument. Path to file that enables OpenAPI generation.","-g- optional argument. Path to directory where generated Java sources with constants from YANG elements are saved. By default, generation of Java files is disabled.","-i /path/to/sources- required argument. User has two options for where the path can be directed:","-jd- optional argument. Flag that enables to generate java documentation on data elements.","-o /path/to/output-directory- required argument. User can define path where he wants to save valid YANG files. If the output directory exists, it will be replaced by a new one.","-pn- optional argument. Custom package name of generated classes.","-px- optional argument. Flag that enables prefix for generated constants names inside generated classes.","-r- optional argument. Selection of repositories inside source directory with files or file with defined names of directories which contains files, from which constants will be generated.","-s /path/to/skip-list- optional argument. User can define YANG file names in text file that he does not want to include in conversion process. This file must only contain module names without revision and .yang suffix.","-to-file- optional argument. When user uses this flag, then YANG packager also saves the debug output to a file. This file can be found on a same path as output-directory. It will contain suffix '-info' in its name. If the output directory is called 'output-directory', then the file will be called 'output-directory-info'.","./convertYangsToUniconfigSchema --help","./convertYangsToUniconfigSchema -h","Bash script ./convertYangsToUniconfigSchema also includes simple help facility. There are two options how to show the help text:","If compilation process detected some invalid YANG files then output directory will not be created. In this case, user has to fix invalid YANG files or use a combination of \"-d\" and \"-s\" arguments.","Script ./convertYangsToUniconfigSchema contains four arguments. Each one has its own identifier so user can use any order of arguments. Two arguments are required, namely the path to resources that contain YANG files and the path to the output directory where user wants to copy all valid YANG files. Other three arguments are optional. First one is the path to the\"default\" directory which contains some default YANG files, second one is the path to the \"skip-list\" and last one is a \"-to-file\" flag, which user can use when he wants to write a debug output to file.","The user is responsible for the validity of YANG files in the default directory. These files are not checked by YANG package.","to the directory that contains YANG files and other sub-directories with YANG files","to the text-file that contains defined names of directories. These defined directories have to be stored on the same path as text-file."]},{"l":"Example use-case"},{"l":"Basic usage 1","p":["This is basic usage of the script where only mandatory arguments are used. In this case, there is a directory with YANG files used as source. All files in source directory are valid YANG files. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Basic usage 2","p":["This is basic usage of the script where only mandatory arguments are used. In this case, there is directory with YANG files used as source. Source directory also contains one invalid YANG file with missing import. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Basic usage 3","p":["This is basic usage of the script where only mandatory arguments are used. In this case, there is directory with YANG files used as source. Source directory also contains one non-yang file. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Usage with default directory","p":["This is usage with path to default directory that contains one YANG file openconfig-mpls. Source directory also contains one invalid YANG file 'cisco-xr-openconfig-mpls-deviations.yang' with missing import 'openconfig-mpls'. This missing import is loaded from default directory. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Usage with skip-list","p":["This is usage with path to skip-list text file that contains one YANG file name cisco-xr-openconfig-mpls-deviations. This YANG file will not be included in the conversion process. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Usage with text-file as a source","p":["In this example a path to text-file with defined names of source directories is used.","Open a terminal, go to the ../utils directory and run command:"]},{"i":"usage-with--to-file-flag","l":"Usage with -to-file flag","p":["This is usage where output is also printed to file. User can find output information file on the path /path/to/output-info.","Open a terminal, go to the ../utils directory and run command:"]},{"i":"usage-with-text-file-as-a-source-and--to-file-flag","l":"Usage with text-file as a source and -to-file flag","p":["In this example a path to text-file with defined names of source directories is used and also flag for print outputs to files. User can find output information files on paths /path/to/output/directory-1-info and /path/to/output/directory-2-info","Open a terminal, go to the ../utils and run command:","Content of text-file"]},{"i":"usage-with--enableswagger-flag","l":"Usage with '-enableSwagger' flag","p":["In this example a path to a text-file with defined names of source directories is used. A flag to print outputs to files and a flag to enable swagger for OpenAPI files generation. The swagger configuration file is located at ../utils/config/swagger-config.json. Swagger output file / files are generated per directory, and they are located in the output directory. The user can find output information files on paths /path/to/output/directory-1-info and /path/to/output/directory-2-info.","Open a terminal, go to the ../utils directory. Run the command:","Additional parameters are available for swagger generation that further customise the OpenAPI file / files. These parameters are located at the beginning of the page.","The output then looks like this:"]},{"i":"error---source-directory-does-not-exist","l":"Error - source directory does not exist","p":["User-defined source directory does not exist.","Open a terminal, go to the ../utils directory and run command:"]},{"i":"error---source-directory-is-empty","l":"Error - source directory is empty","p":["User-defined source directory is empty. Open a terminal, go to the ../utils directory and run command:"]},{"i":"error---sources-defined-in-text-file","l":"Error - sources defined in text-file","p":["One directory defined in the text-file is empty and other one does not exist.","Open a terminal, go to the ../utils and run command:","Content of text-file"]}],[{"l":"Admin State","p":["Admin state is used to lock, unlock or southbound-lock devices. Modification of data on those devices is then allowed or forbidden accordingly. Currently, there are three states that are supported:","LOCKED - When a device is administratively locked, it is not possible to modify its configuration, and no changes are ever pushed to the device.","UNLOCKED - Device is assumed to be operational. All changes are attempted to be sent southbound. This is the default when a new device is created.","SOUTHBOUND_LOCKED - It is possible to configure the device, but no changes are sent to the device. Admin mode is useful when pre provisioning devices.","This state is automatically added to the device during installation. The user can further specify what state the device should be in, via:","\"uniconfig-config:admin-state\": \"unlocked\"","The state variable should be from one of the above-mentioned options.","If the user wants to change the state after the installation, an RPC for changing that state is available."]},{"l":"RPC Example","p":["RPC input contains the device name and the state that it should be change to."]},{"i":"rpc-example-1","l":"RPC Example","p":["GET request to get the actual state of the device."]},{"l":"RPC Failed Example","p":["Device is in locked admin-state and the user tries to modify data on the device."]}],[{"l":"Build-and-Commit Model"},{"l":"Introduction","p":["Build-and-commit model is based on explicit creation of the transaction, invoking operations in the scope of this transaction and finally committing or closing transaction. The transaction represents a session between the client and the UniConfig instance.","Using explicitly created transactions has multiple advantages in comparison to Immediate Commit Model:","Multiple operations and modifications can be invoked in the single transaction while keeping transactions isolated.","Most of the UniConfig operations, such as calculate-diff and commit, doesn't have any usage in the Immediate Commit Model - they are valuable only if the Build-and-Commit Model is used.","The transaction allows a client to identify if it still communicates with the same UniConfig instance (this property is usable in the clustered deployment). If the UniConfig instance does not know about the transaction then the request will fail because transaction expired, is closed, or has never been created."]},{"l":"Configuration","p":["Configuration related to UniConfig transactions is placed in the'config/lighty-uniconfig-config.json' file under 'transactions' container. Note that build-and-commit model is enabled if'uniconfigTransactionEnabled' is set to 'true' value (default value)."]},{"l":"Optimistic locking mechanism","p":["Race condition between transactions that are committed in parallel and contain changes of same nodes (uniconfig, unistore, snapshot, or template nodes) is solved using optimistic locking mechanism. Configuration of same node can be modified in parallel from 2 transactions, however only the first committed transaction will succeed. Commit of the second transaction will fail.","UniConfig uses 2 different techniques for detection of conflicts during commit or checked-commit operation:","Comparison of configuration fingerprints - Fingerprint value is updated for altered node at the end of the commit operation - at the beginning of commit operation, UniConfig compares the value of actual fingerprint in database with value of fingerprint read before the first CRUD operation done in the transaction and the last synced fingerprint (updated after execution of sync-from-network RPC). If actual fingerprint from database equals to fingerprint read before the first CRUD operation or the last synced fingerprint, then commit operation can continue. Otherwise, error is returned without touching any devices on network.","Per-node advisory locks - Comparison of configuration fingerprints are reliable if transactions are committed one after another. However, such serialization cannot be achieved in the clustered environment because UniConfig instances are not coordinated. If 2 transactions are committed at the same time and both assume that configuration fingerprints haven't been updated by other transaction, both transactions may start to push changes to network devices at the same time. To prevent prevent occurrences of this scenario, UniConfig locks node in the PostgresSQL database using transaction-level advisory locks at the beginning of commit operation. If other transaction tries to lock the same node, this attempt will fail, and second transaction will not enter critical section - rather it will fail. Locks are automatically released at the end of the transaction (commit RPC closes transaction).","All possible scenarios are captured in the following diagrams.","Optimistic locking"]},{"l":"Dynamic mountpoints","p":["Mountpoints are created only when UniConfig needs to read / write some data from / to device and lifecycle of mountpoint is bounded by lifecycles of transactions that use the mountpoint. If some mountpoint is not used by any transaction, then UniConfig automatically closes this mountpoint - associated operational data on southbound layer and connection to device are removed.","The first diagram demonstrates mounting of 2 devices which are used by 1 transaction - after this transaction is closed, both mountpoints are closed. The second diagram shows scenario in which 2 transactions share 1 of 2 mountpoints - after the first transaction is closed, 1 of the mountpoints is not closed since the second transaction still may communicate with corresponding device."]},{"l":"Creation of transaction","p":["Transaction can be created using create-transaction RPC. RPC doesn't specify input body and also returns response without body. Response additionally contains Set-Cookie header with UNICONFIGTXID key and corresponding value - transaction identifier that conforms RFC-4122 Universally Unique IDentifier (UUID) format.","Process of transaction creation is depicted by following sequence diagram.","create-transaction RPC","UniConfig is performing following steps after calling create-transaction RPC:","Creation of connection to database system - Connection is created with disabled auto-commit - enabling transactional features. UniConfig uses 'read committed' isolation level.","Creation of database transaction - It provides access to remote PostgreSQL database. Using database transaction it is possible to read committed data, read uncommitted changes created by this transaction and write modifications to database. Data read at the first access to some resource is cached to datastore transaction - when some component tries to access the same resource again, it is read only from datastore transaction. Data is written to database transaction at invocation of commit/checked-commit RPC.","Creation of datastore read-write transaction - It provides access to OPER and CONFIG datastores bound to this transaction. Datastore is used only as a cache between application and PostgreSQL database, and it resides only in the memory allocated to UniConfig process. Datastore transaction is never committed - cache is trashed at the end of the transaction life.","Registration of transaction - Transaction is always bound to 1 specific UniConfig instance."]},{"l":"Successful example","p":["The following request shows successful creation of UniConfig transaction. Response contains Set-Cookie header with UNICONFIGTXID key and value."]},{"l":"Failed example","p":["The most common reason for failed creation of UniConfig transaction is reached maximum number of open transactions that is limited by('maxDbPoolSize' - 'maxInternalDbConnections') database connection pool setting. In that case, UniConfig returns response with 500 status code."]},{"l":"Transaction idle-timeout","p":["Create-transaction RPC can be used with optional query parameter called timeout. This parameter is used to override global idle timeout for transaction created by this RPC call. After transaction inactivity for specified time transaction will be automatically cleaned. Value of this parameter is whole number and defines time in seconds."]},{"l":"Dedicated session to device","p":["By default, UniConfig shares southbound session to network device, if multiple UniConfig transactions use the same device via same management protocol. This behaviour can be disabled using 'dedicatedDeviceSession' query parameter which accepts boolean value. Afterwards, UniConfig transaction will create dedicated session to device which is used only by one transaction and closed immediately after committing or closing the transaction.","Dedicated sessions to device are useful when:","Device is not able to process requests in parallel via same session.","Device is able to process requests in parallel via same session, but it doesn't process them in parallel","decreasing processing performance."]},{"l":"Invocation of CRUD operation in transaction","p":["CRUD operations for modification or reading node configuration can be invoked in the specific transaction by appending UNICONFIGTXID (key) with UUID of transaction (value) to Cookie headers. In that case, operation will be invoked only in the scope of single transaction - changes are not visible to other transactions until this transaction is successfully committed.","Next diagram describes execution of CRUD operation from RESTCONF API. It shows also difference between datastore and database transaction - data is read from database only at the first access to some data (for example, node configuration). After that, this configuration is cached inside temporary datastore transaction - goal is to improve performance by limiting transferring data between UniConfig and PostgreSQL. Next access to same configuration can be evaluated under in-memory datastore.","Invocation of CRUD"]},{"i":"successful-example-1","l":"Successful example","p":["The following request demonstrates reading of some configuration from uniconfig topology, junos node in the transaction with ID'd7ff736e-8efa-4cc5-9d27-b7f560a76ff3'."]},{"i":"failed-example-1","l":"Failed example","p":["Trying to use non-existing UniConfig transaction results in 422 status code (Unprocessable Entity)."]},{"l":"Invocation of RPC operation in transaction","p":["RPC operation can be invoked in the specific transaction the same way as CRUD operation - by specification of UNICONFIGTXID in the Cookie header.","There are few differences between CRUD and RPC operations from the view of transactions:","Commit, checked-commit, and close-transaction RPCs can state of the transaction. Create-transaction RPC is reserved for creation of transaction.","Not all RPC operations that are exposed by UniConfig use dedicated transactions - in that case, these RPCs just ignore explicitly specified transaction and either don't work with transactions at all or create transaction internally (examples: install-node, uninstall-node RPC).","There are also transaction-aware operations that directly leverage properties of transactions. For example, if some UniConfig RPC is invoked with empty list of target nodes, then operation is automatically applied to all modified nodes in the transaction(calculate-diff RPC with empty target nodes computes diff for all modified nodes in the transaction).","Following diagram shows execution of random RPC in the specified transaction.","Invocation of RPC"]},{"i":"successful-example-2","l":"Successful example","p":["Invocation of calculate-diff RPC in the transaction which contains modifications done on the 'junos' node."]},{"i":"failed-example-2","l":"Failed example","p":["Invocation of calculate-diff RPC with transaction ID that has wrong format."]},{"l":"Closing transaction","p":["There are 2 options how transaction can be closed:","close-transaction RPC - Explicit closing of transaction that results in dropping of all changes done in the transaction.","commit/checked-commit RPC - After execution of commit operation, transaction is automatically closed (despite of commit result). Behaviour of commit and checked commit RPC is described in better detail under the 'UniConfig Node Manager' section.","Close-transaction RPC doesn't contain body, only Cookie header with UNICONFIGTXID property pointing to transaction that user would like to close. Response contains information if transaction has been successfully closed.","Following sequence diagrams describe close-transaction procedure. It is split into 2 diagrams to improve readability and to reuse some parts from other diagrams.","close-transaction RPC","Clean orphaned mountpoints","Briefly depicted most important actions:","Loading UniConfig transaction from registry by provided transaction ID that is extracted from Cookie header.","Closing connection to database.","Cancellation of database transaction.","Cancellation of datastore read-write transaction.","Unregistration of transaction from local registry.","Unmounting nodes that are not referenced by any UniConfig transaction - connection to device is closed and representing southbound / Unified mountpoints are removed together with state data.","After transaction is closed, it cannot be used by any other operation - user must create a new transaction in order to use build-and-commit model."]},{"i":"successful-example-3","l":"Successful example","p":["Closing existing transaction using close-transaction RPC. Response doesn't body, only status code 200."]},{"i":"failed-example-3","l":"Failed example","p":["If transaction has already been closed, user will receive response with JSON body containing error message."]},{"l":"Transaction cleaner","p":["Transaction cleaner is used for automatic closing of transactions that are open longer then specified timeout value ('transactionIdleTimeOut' or 'maxTransactionAge' setting in the configuration). Transaction resets her time setting 'transactionIdleTimeOut' after invoking CRUD, RPC operation, and is still valid for time specified in value of setting. This mechanism effectively suppresses application-level errors - open transactions are not closed at the end of the workflow.","Next sequence diagram describes cleaning process. Referenced diagram'Close transaction' is placed in the previous 'Closing transaction' section."]},{"l":"Use cases"},{"l":"Modification of different devices in separate transactions","p":["1. Installation of 2 devices - ‘xr6_1’ and ‘xr6_2’ (without transaction ID)","2. Creation of 2 uniconfig transactions: let’s name them TX1 and TX2","3. Modification of ‘xr6_1’ uniconfig configuration inside TX1","4. Modification of ‘xr6_2’ uniconfig configuration inside TX2","5. Verification if TX1 and TX2 are isolated","6. Committing TX1 and TX2 using uniconfig-manager:commit RPC","7. Verification of committed data","8. Verification if TX1 and TX2 are closed","All 3 responses - Status 200 OK with returned expected data. Similar verification can be done on 'xr6_2'.","Both responses should return Status 404 Not Found:","Creation of new Loopback79 interface - cookie header contains UNICONFIGTXID of TX2:","Creation of new Loopback97 interface in the TX1 - cookie header contains UNICONFIGTXID of TX1:","It is not required to specify target nodes in the input because UniConfig transaction tracks modified nodes:","Response - Status 422 Unprocessable Entity:","Response:","Since there aren't any conflicts between modifications in the committed transactions, both RPCs should succeed. Expected responses:","The first response contains transaction-id of TX1 that can be used in the subsequent requests that belong to TX1:","The first second contains transaction-id of TX2 that can be used in the subsequent requests that belong to TX2:","Trying to read some data in the TX1:","Trying to read some data in the TX2:","TX1 doesn't see modifications done in TX2 and vice-versa:","Verification if configuration was correctly committed to devices (direct read under yang-ext:mount) and if datastore was updated (GET request without transaction ID):","Verification if TX1 contains created interface (Cookie header contains UNICONFIGTXID of TX1):","Verification if TX2 contains created interface (Cookie header contains UNICONFIGTXID of TX2):"]},{"l":"Modification of sub-tree on same device in separate transactions","p":["1. Installation of device ‘xr6_1’","2. Preparation of configuration on 'xr6_1'","3. Creation of 2 uniconfig transactions: let’s name them TX1 and TX2","4. Modification of ‘xr6_1’ uniconfig configuration inside TX1","5. Modification of ‘xr6_1’ uniconfig configuration inside TX2","6. Commit TX1","7. Commit TX2","8. Verification of committed data in TX1 / non-committed data in TX2","9. Verification if TX1 and TX2 are closed","Changing description of interface Loopback97 to 'next loopback': - there is a conflict with TX1 which also tries to create/replace the configuration of the same interface:","Changing description of interface Loopback97 to 'test loopback':","Commit TX1 without target nodes - it should fail because the same node has already been modified by different transaction that has already been committed:","Commit TX2 without target nodes - it should pass:","Creation of Loopback97 interface with some initial description:","Creation of the uniconfig transaction TX1:","Creation of the uniconfig transaction TX2:","Respective responses:","Response - Status 200 OK with error message:","Response:","Trying to read some data in the transaction:","Verification if committed changes in TX1 were applied to datastore and device:"]}],[{"l":"Device Discovery","p":["\"addressCheckLimit\" specifies how many addresses are checked. If more addresses are specified in the request, the request will not be successful.","\"max-pool-size\" specifies the size of the executor that is used. If the amount of addresses in the request is high, consider raising the value.","\"network\": \"192.168.1.0/24\"","\"start-ipv4-address\": \"192.168.1.1\", \"end-ipv4-address\":\"192.168.1.254\"","/opt/uniconfig-frinx/config/application.properties","~/FRINX-machine/config/uniconfig/frinx/uniconfig/config/application.properties","Execute the ifconfig command in the terminal and look for an interface. If you are using a VPN, the interface is often called tun0. If not, look for a different interface. Copy inet from the interface and paste it into the file.","For testing, you need to add your IP address to the configuration JSON file. The configuration file is located under","If you specify the range using a network statement, the network address and broadcast address will not be included in the discovery process. If you specify the range via range statements, make sure that only hosts addresses are included in the specified range.","If you want to discover hosts and ports in listening state in a network, do not add the network and broadcast address of that network. For example, if you want to check the network \"192.168.1.0/24\", you can use one of the following:","initial-pool-size of the thread pool that is used by the executor.","kepalive-time specifies the time (in seconds) before the execution of a specified task is timed out.","RPC device-discovery is used to verify reachable devices in a network. You can either check a single IP address in IPv4 format, a network or a range of addresses. Additionally, you can also specify a port or range of ports (TCP or UDP) that are checked if they are open. The ICMP protocol is used to check the availability devices.","The input consists of a list of all IP addresses that should be checked(IPv4 or IPv6, a single IP address or a network with a prefix, or a range of IP addresses). Additionally, it contains the TCP/UDP ports that should be checked whether they are open or not on the given addresses.","The output of the RPC shows if the IP addresses are reachable via the ICMP protocol. For every IP address, a list of open TCP/UPD ports is also included.","The snippet contains two additional parameters.","When running UniConfig stand-alone, the config file is in the config folder:"]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains a network with the prefix /29. Addresses in the network and desired ports are checked for availability. The output contains reachable addresses in the network and all open TCP/UDP ports."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains a range of addresses. The addresses and desired ports are checked for availability. The output contains reachable addresses and all open TCP/UDP ports."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains the host name and ports that are checked for availability. The output shows if the host is reachable as well as all open TCP/UDP ports."]},{"l":"Failed Example","p":["RPC input contains two addresses that are incorrectly wrapped."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains an IP range where the start point is greater than the end point."]},{"l":"Not supported operation Example","p":["RPC input contains a network in IPv6 format that is currently not supported."]}],[{"l":"Dry-run manager"},{"l":"RPC dryrun-commit","p":["The RPC will resolve the diff between actual and intended configuration of nodes by using UniConfig Node Manager. Changes for CLI nodes are applied by using cli-dryrun mountpoint which only stores translated CLI commands to the cli-dry-run journal. After all changes are applied, the cli-dryrun journal is read and an RPC output is created and returned. It works similarly with NETCONF devices, but it outputs NETCONF messages instead of CLI commands. RPC input contains a list of UniConfig nodes for which to execute the dry run. Output of the RPC describes the results of the operation and matches all input nodes. It also contains a list of commands, and NETCONF messages for the given nodes. If RPC is called with empty list of target nodes, dryrun operation is executed on all modified nodes in the UniConfig transaction. If one node failed for any reason the RPC will be failed entirely.","RPC dryrun commit"]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to the device if the RPC commit or checked-commit was called."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input does not contain target nodes, dryrun is executed with all modified nodes."]},{"l":"Failed Example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to the device if the RPC commit or checked-commit was called. One node does not support dry-run."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to the device if the RPC commit or checked-commit was called. One node has a bad configuration."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to a device if the RPC commit or checked-commit was called. One node does not support dry-run (IOSXR) and one is not in the unified topology (IOSXRN). There is one extra node, which has not been mounted yet (AAA)."]},{"i":"failed-example-3","l":"Failed Example","p":["RPC input contains a target node and the output contains a list of commands which would be sent to a device if the RPC commit or checked-commit was called. One node has not been mounted yet (AAA)."]},{"i":"failed-example-4","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there weren't any touched nodes, the request will result in an error."]}],[{"l":"Immediate Commit Model","p":["The immediate commit creates new transactions for every call of an RPC. The transaction is then closed so no lingering data will occur.","For reading data (GET request), a sequential diagram was created for better understanding of how the whole process works.","Get Request","Similarly, a sequential diagram for putting data (PUT request) was created as well.","Put Request","The key difference in those diagrams is that editing data (PUT, PATCH, DELETE, POST) + RPC calls in the database need to be committed, so there is an additional call of the commit RPC. This commit ensures that the transaction is closed. For reading data, it is necessary to close the transaction differently, because no data were changed, so calling a commit would be unnecessary.","When calling the 'sync-from-network' RPC, it internally calls'replace-config-with-operational'. Note that this only works when using the Immediate Commit Model."]},{"l":"Configuration","p":["Configuration related to UniConfig transactions is placed in the'config/lighty-uniconfig-config.json' file under 'transactions' container. A user can turn off the Immediate Commit Model and use only the Build and Commit Model instead."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains a new interface that will be added to the existing ones.","After putting the data into the database, they will be automatically committed and can be viewed."]},{"l":"Failed Example","p":["RPC input contains a value that is not supported."]}],[{"l":"Kafka Notifications"},{"l":"Introduction","p":["NETCONF devices produce NETCONF notifications. UniConfig can collect these and create its own UniConfig notifications about specific events. Notifications from both NETCONF devices and UniConfig are published using Kafka.","The following notification types are available:","NETCONF notifications","Notifications about transactions","Audit logs (RESTCONF notifications)","Data-change events","Connection notifications","Each notification type is stored in its own topic in Kafka. Additionally, all notifications are stored in one table in the database.","notifications-in-cluster"]},{"l":"Kafka","p":["Apache Kafka is a publish–subscribe-based, durable messaging system that sends messages between processes, applications and servers. Within Kafka, you can define topics (categories) and applications can add, process and reprocess records.","In our specific case, UniConfig publishes notifications. Each type of notification is stored in a separate topic and can therefore be subscribed to independently. The names of topics and connection data are configurable in the file lighty-uniconfig-config.json."]},{"l":"NETCONF notifications","p":["RFC 5277 defines a mechanism where the NETCONF client indicates an interest in receiving event notifications from a NETCONF server by subscribing to event notifications. The NETCONF server replies whether the subscription request was successful and, if so, starts sending event notifications to the NETCONF client as events occur within the system. Event notifications are sent until either the NETCONF session or the subscription is terminated.","NETCONF notifications are categorised as so-called streams. The subscriber must choose which streams to receive. The default stream is named NETCONF."]},{"l":"Notifications about transactions","p":["This type of notification is generated after each commit in UniConfig.","It contains the following:","transaction id","calculate diff result","commit result"]},{"i":"audit-logs-restconf-notifications","l":"Audit logs (RESTCONF notifications)","p":["Below are three examples of notifications with the response body and the calculation difference result.","body","http-method","It contains the following:","query-parameters","request data","response data","source-address","source-port","status-code","The first example is for created data:","The response body does not need to be included in notifications. It can be configured using the includeResponseBody parameter in the application.properties file. Also, the calculation difference result can be part of the notification if the parameter includeCalculateDiffResult parameter is set to true in the file application.properties.","The second example is for deleted data:","The third example is for updated data:","This type of notification is generated after each RESTCONF operation.","transaction id","uri","user-id"]},{"l":"Shell notifications","p":["This type of notification is generated after each shell operation.","It contains the following:","transaction id","request data","source-address","source-port","prompt","executed command","response data","output"]},{"l":"Data-change events","p":["A subscription step is required before data-change events are generated and published into Kafka. With the subscription, a user can specify observed subtrees against data changes. Afterwards, data-change events are generated by UniConfig instances when a transaction is committed and committed changes contain subscribed subtrees.","A sample data-change event captured by Kafka console consumer:","For data-change events, the streamName is always 'DCE' and the identifier for the YANG notification is 'data-change-event'. The body contains the following:","subscription-id: Identifier of the subscription that triggers generation of data-change-event. Subscription identifier makes association of subscriptions and received data-changes-events easier than using combination of multiple fields such as node identifier, topology identifier and subtree path.","transaction-id: Identifier of committed transaction that triggered data-change-event after commit or checked-commit UniConfig operations.","edit - List of captured modifications done in the committed transaction.","Edit entry fields:","subtree-path: Relative path to data-tree element at which data-change happened. Path is relative to subtree-path specified during subscription.","data-before: JSON representation of subtree data before done changes. If this field is not present, then 'data-after' represents created data.","data-after: JSON representation of subtree data including done changes. If this fields is not present, then'data-before' represents removed data.","operation: Operation type of the data change event.","node-id: Node identifier of the data change event.","topology-id: Topology where the node exists. Can be either 'uniconfig' or 'unistore'."]},{"l":"Connection notifications","p":["Connection notification are generated whenever the status of a node changes. For connection notifications, the streamName is always 'CONNECTION' and the identifier for the YANG notification is ' connection-notification'.","It contains the following:","topology id","node id","connection status","connection message","Supported topologies are cli, netconf and gnmi.","Sample connection notifications captured by Kafka console consumer:","CLI disconnect notification:","NETCONF connect notification:"]},{"l":"Database entities","p":["body - full notification body in JSON format","creation time - time when subscription was created","end time - time when notifications stop to be collected","event time - time when notification was generated","Example request for reading Kafka settings using RESTCONF:","Example request for reading notifications using RESTCONF:","Example request for reading subscriptions using RESTCONF:","identifier - name of the YANG notification","netconf-subscription","node id - id of the NETCONF node from which notifications should be collected","node id - node id of the NETCONF device for NETCONF notifications or identifier of UniConfig instance in case of other types of notifications","notification","Notifications are stored in the notification table. It contains the following columns:","settings","start time - time when notifications start to be collected","stream name - name of the notification stream - NETCONF stream name or UniConfig-specific stream name","stream name - NETCONF stream name","The following three tables in the database are related to notifications:","The netconf-subscription table is used to track NETCONF notification subscriptions. It contains the following columns:","The settings table contains two columns: identifier and config. Records with the identifier kafka contain configurations for Kafka that can be modified at runtime.","UniConfig instance id - instance id of UniConfig that is collecting notifications from the NETCONF device"]},{"l":"NETCONF subscriptions","p":["A subscription is required to receive NETCONF notifications from a NETCONF device. Subscriptions are created using an install request:","Subscriptions to notification streams are defined as a list with the name stream. There is one record for each stream. The only required parameter is stream-name. The following optional parameters are supported:","start-time- must be specified to enable replay and should start at the specified time.","stop time- used with the optional replay feature to indicate the newest notifications of interest. If stopTime is not specified, notifications will continue until the subscription is terminated. Must be used with and set to be later than start-time. Values in the future are valid.","The creation of a new subscription for the stream will terminate all existing subscriptions for the stream."]},{"i":"monitoring-system---processing-netconf-subscriptions","l":"Monitoring system - processing NETCONF subscriptions","p":["Inside UniConfig, NETCONF notification subscriptions are processed in an infinite loop within the monitoring system. An iteration of the monitoring system loop consists of following steps:","Check global setting for NETCONF notifications","If turned off, release all NETCONF subscriptions and end current iteration","Release cancelled subscriptions","Query free subscriptions from DB, and for each one:","Create a notification session (create mountpoint and register listeners)","Lock the subscription (set UniConfig instance)","There is a hard limit for the number of sessions that a single UniConfig node can handle. If the limit is reached, the UniConfig node refuses any additional subscriptions.","The loop interval, hard subscription limit and maximum number of subscriptions processed per interval can be set in the file lighty-uniconfig-config.json."]},{"l":"Dedicated NETCONF session for subscription","p":["A NETCONF device may have the interleave capability that indicates support for interleaving other NETCONF operations within a notification subscription. This means that the NETCONF server can receive, process and respond to NETCONF requests on a session with an active notification subscription. As not all devices include support for this capability, the common approach for devices 'with' and 'without' interleave capability is to track notifications with a separate NETCONF session. To support this functionality, UniConfig creates a separate NETCONF session with a separate mount-point for every subscription. These mount points and sessions are automatically destroyed when the corresponding subscription is closed.","monitoring-system"]},{"l":"Subscription to data-change events"},{"l":"Creating a new subscription","p":["'BASE': Represents only a direct change of the node on subtree-path, such as replacement of a node, addition or deletion.","'ONE': Represent a change (addition, replacement, or deletion) of the node on the subtree-path or one of its direct child elements.","'SUBTREE': Represents a change of the node or any of its child nodes, direct and nested. This scope is a superset of ONE and BASE. This is the default value.","captured data-change-events from whole node configuration.","data-change-scope: Data-tree scope that specifies how granular data-change-events should be captured and propagated to Kafka. There are three options:","deleting existing subscription","displaying information about created subscription using RPC","Example: Creating a subscription to the node device1 in the uniconfig topology, and to the whole configuration subtree '/interfaces'.","Example: Creating a subscription to the uniconfig topology and to the whole /interfaces configuration subtree.","node-id: Identifier for the node from which data-change-events are generated. This field is optional. If not given, a global subscription is created and data-change-events are generated for all nodes under the topology.","RPC input contains the following:","RPC output contains only the generated 'subscription-id' in the format of UUID. This subscription identifier represents a token that can be used for the following:","sorting received Kafka messages","Subscriptions to data-change events are created using the 'create-data-change-subscription' RPC. After the subscription is done, UniConfig listens to data-change events on selected nodes and subtrees and distributes the corresponding messages to a dedicated Kafka topic.","subtree-path: Path to the subtree from which the user would like to receive data-change-events. Default path is '/'","topology-id: Identifier for the topology where the specified node is placed."]},{"l":"Removing a subscription","p":["Existing subscriptions can be removed using the delete-data-change-subscription RPC and the provided subscription-id. After a subscription is removed, UniConfig stops generating new data-change events related to the subscribed path.","RPC input contains only subscription-id, a unique identifier for the subscription to data-change events. RPC output does not contain a body. The RPC returns 404 if no subscription exists for the provided identifier.","Example: Removing a subscription with the ID 8e82453d-4ea8-4c26-a74e-50d855a721fa."]},{"l":"Successful Example"},{"l":"Failed Example"},{"l":"Showing information about subscription","p":["The RPC show-subscription-data is used to display information about a created subscription.","RPC input contains the identifier of the target subscription.","RPC output for existing subscriptions contains 'topology-id', 'node-id', 'subtree-path' and 'data-change-scope' - the same fields that can also be specified in the 'create-data-change-subscription' RPC input.","If no subscription exists with the specified ID, the RPC returns a 404 status code with a standard RESTCONF error container.","Example: Displaying information"]},{"i":"successful-example-1","l":"Successful Example"},{"i":"failed-example-1","l":"Failed Example","p":["It is also possible to fetch all created subscriptions under a specific node or topology by sending a GET request to the data-change-subscriptions list under the node list item (operational data).","Example (there are two subscriptions under the device1 node):"]},{"l":"Configuration","p":["All notifications and the monitoring system can be enabled or disabled using the enabled flag.","All settings related to Kafka are grouped under kafka property. For authentication, there are the username and password properties. For the Kafka connection, there is the kafkaServers property. This contains a list of Kafka servers as a combination of brokerHost and brokerListeningPort. Broker host can be either an IP address or hostname.","archiveUrl - where to download kafka from","Audit logs settings are under auditLogs property. Currently there is only one flag includeResponseBody, which is used to enable or disable logging of the body of RESTCONF responses.","auditLogsEnabled","auditLogsTopicName - topic name for audit logs","blockingTimeout - How long the send() method and the creation of a connection for reading metadata methods will block (in ms).","cleanDataBeforeStart - if kafka config should be cleared before start","Configurations for notifications are in the lighty-uniconfig-config.json file, under the notifications property. The entire configuration looks like this:","dataChangeEventsEnabled","dataChangeEventsTopicName - topic name for data-change-events","dataDir - kafka data directory","deliveryTimeout - The upper bound on the time to report success or failure after a call to send() returns (in ms). Sets a limit on the total time that a record will be delayed prior to sending, the time to wait for acknowledgement from the broker (if expected) and the time allowed for retriable send failures.","enabled - flag that enables or disables embedded kafka","installDir - where should be kafka files placed","Kafka settings are also stored in the db. This way they can be changed at runtime using RESTCONF or UniConfig shell. Kafka setting are stored in the settings table.","maxAge - Maximum age of a record in the notifications table (in hours). Records older than this value are deleted. The default value is 100.","maxCount - Maximum number of records in the notifications table. If the number of records exceeds this value, the oldest record in the table is deleted. The default value is 10,000.","maxNetconfSubscriptionsHardLimit - Maximum number of subscriptions that a single UniConfig node can handle.","maxSubscriptionsPerInterval - The maximum number of free subscriptions that can be acquired in a single iteration of the monitoring system loop. If the number of free subscriptions is smaller than this value, all free subscriptions are processed. If the number of free subscriptions is larger than this value, only the specified number of subscriptions are acquired. The rest can be acquired during the next iterations of the monitoring system loop or by other UniConfing instances in the cluster. The default value is 10.","maxThreadPoolSize - The maximum thread pool size in the executor.","netconfNotificationsEnabled","netconfNotificationsTopicName - topic name for NETCONF notifications","optimalNetconfSubscriptionsApproachingMargin - The lower margin to calculate optimal range start. The default value is 0.05.","optimalNetconfSubscriptionsReachedMargin - The higher margin to calculate optimal range end. The default value is 0.10.","queueCapacity - The maximum capacity for the work queue in the executor.","rebalanceOnUCNodeGoingDownGracePeriod - Grace period for a UniConfig node going down. Other nodes will not restart subscriptions until the grace period has passed after a dead Uniconfig node was last seen. The default value is 120 seconds.","requestTimeout - How long the producer waits for acknowledgement of a request (in ms). If no acknowledgement is received before the timeout period is over, the producer will resend the request or, if retries are exhausted, fail it.","subscriptionsMonitoringInterval - How often the monitoring system loop is run and attempts to acquire free subscriptions. The value is given in seconds, the default value is 5.","These properties are under notificationDbTreshold. Both of these are implemented using database triggers. Triggers are running on inserts to notifications table.","Three (3) properties related to the monitoring system in clustered environments:","Three (3) properties related to the monitoring system:","Three (3) properties related to the timeout of messages to Kafka","transactionNotificationsEnabled","transactionsTopicName - topic name for transactions about notifications","Two (2) properties related to the thread pool executor required to send messages to Kafka","Two (2) properties used to limit the number of records in the notifications table in the database:","You can also to set up embedded Kafka. These setting are grouped under the embeddedKafka property:","You can configure the names of all topics for every notification type. The following flags are used for this:","You can enable or disable each type of notification independently of others. The following flags are used for this:"]},{"i":"kafka-client---example","l":"Kafka client - example","p":["To read notifications from kafka, you can use the command line consumer. Run the following command in the Kafka installation directory:","It is important to properly set up the hostname, port and topic name. Output after creation of NETCONF notification looks something like this:"]}],[{"l":"Operational data about transactions"},{"l":"Operational data about transactions","p":["To have a better overview of UniConfig transactions, there are operational data about all open transactions.","Data about transactions contain:","identifier (uuid)","creation time","last access time","idle timeout","hard timeout","list of changed nodes (incl. topologies)","additional context (random string, text column)","Data about transactions can be read using RESTCONF:","Example data about transactions:"]}],[{"l":"Templates Manager"},{"l":"Overview","p":["Templates can be used for reusing of some configuration and afterwards easier application of this configuration into target UniConfig nodes.","Basic properties of templates as they are implemented in UniConfig:","All templates are stored under 'templates' topology and each template is represented by separate 'node' list entry.","Whole template configuration is placed under'frinx-uniconfig-topology:configuration' container in the Configuration datastore. Because of this, configuration of template can be accessed and modified in the same way like modification of UniConfig node.","Templates are validated against single schema context. Schema context, against which validation is enabled, is selected at creation of template using 'uniconfig-schema-repository' query parameter. Value of the query parameter defines name of the schema repository that is placed under UniConfig distribution in form of the directory.","Currently implemented template features:","Variables- They are used for parametrisation of templates.","Tags- Tags can be used for selection of an operation that should be applied for the specific subtree at application of template to UniConfig node.","Schema validation of leaves and leaf-lists is adjusted, so it can accept both string with variables and original YANG type."]},{"l":"Latest-schema","p":["Latest-schema defines name of the schema repository of which built schema context is used for template validation. Latest-schema is used only if there is not 'uniconfig-schema-repository' query parameter when creating template. If 'uniconfig-schema-repository' query parameter is defined, latest-schema is ignored."]},{"l":"Configuration of the latest-schema","p":["Latest-schema can be set using PUT request. It will be placed in Config datastore. Name of directory has to point to existing schema repository that is placed under UniConfig distribution.","GET request can be used for check if latest-schema is placed in config datastore."]},{"l":"Auto-upgrading of the latest-schema","p":["Latest-schema can be automatically upgraded by UniConfig after installation of new YANG repository. YANG repository is installed after deploying of new type of NETCONF/GRPC device or after manual invocation of RPC for loading of new YANG repository from directory.","In order to enable auto-upgrading process, 'latestSchemaReferenceModuleName' must be specified in the'config/lighty-uniconfig-config.json' file:","After new YANG repository is installed, then UniConfig will look for revision of module'latestSchemaReferenceModuleName' in the repository. If found revision is more recent than the last cached revision, UniConfig will automatically write identifier of the fresh repository into 'latest-schema' configuration. Afterwards, 'latest-schema' is used by UniConfig the same way as it would be written manually via RESTCONF."]},{"l":"Variables","p":["Using variables it is possible to parametrise values in the template. Structural parametrisation is not currently supported.","Properties:","Format of the variable: '{$variable-id}'.","Variables can be set to each leaf and leaf-list in the template.","Single leaf or leaf-list may contain multiple variables.","Key of the list can also contain variable.","Variables are substituted by provided values at the application of template to UniConfig node.","It is possible to escape characters of the variable pattern ('$','{', '}'), so they will be interpreted as value and not part of the variable.","Variable identifier may contain any UTF-8 characters. Characters'$', '{', '}' must be escaped, if they are part of the variable identifier."]},{"l":"Examples with variables","p":["A. Leaf with one variable","Application of following values to variables 'var-a' and 'var-b':'var-a' = ['10', '20', '30'], 'var-b' = ['50', '70', '60'].","Application of values - 'var-x': 'next', 'var-y': '7', 'var-1': '10','var-2': '9'. Leaf 'leaf-a' has 'string' type and 'leaf-b' has 'int32' type.","Application of values '10' and 'false' to 'var-1', and 'var-2'. Leaf'leaf-a' has 'int32' type and 'leaf-b' has 'boolean' type.","B. Leaf with multiple variables","Both variables must be substituted by the same number of values.","C. Leaf-list with one variable","D. Leaf-list with multiple variables","E. Leaf-list with entry that contains multiple variables","F. Leaves and leaf-lists with escaped special characters","If leaf-list is marked as \"ordered-by user\", then the order of leaf-list elements is preserved during substitution process.","It is possible to substitute both variables with one or multiple variables.","Leaf 'leaf-a' contains 2 variables and surrounding text that is not part of any variable.","Leaf 'leaf-b' contains 2 variable without additional text - substituted values of these variables are concatenated at application of template.","Leaf-list 'leaf-list-a' contains 2 variables inside one leaf-list entry: 'var-a' and 'var-b'.","Leaf-list 'leaf-list-a' contains 2 variables with identifiers'var-a' and 'var-2'. String \"str3\" represents constant value.","Leaf-list 'leaf-list-a' contains variable with identifier 'var-x'.","Substitution of 'var-1' by 'prefix' and 'var-{2}' by '10':","Substitution of 'var-a' with texts 'str1', 'str2' and 'var-b' with'str4' results in ('string' type):","Substitution of 'var-x' with numbers '10', '20', '30' results in('int32' type):","The following example demonstrates escaping of special characters outside of the variable identifier (leaf-list 'leaf-list-a') and inside of the variable identifier (leaf 'leaf-a').","The following example shows 2 leaves with 2 variables: 'var-1' and'var-2'.","This variable can be substituted by one or multiple values. If multiple values are provided in the apply-template RPC, they are'unwrapped' to the leaf-list in form of next leaf-list entries.","Unescaped identifier of the leaf 'leaf-a': 'var-{2}'."]},{"l":"Tags","p":["By default, all templates have assigned 'merge' tag to the root'configuration' container - if template doesn't explicitly define next tags in the data-tree, then the whole template is merged to target UniConfig node configuration at execution of apply-template RPC. However, it is possible to set custom tags to data-tree elements of the template.","Properties:","Tags are represented in UniConfig using node attributes with the following identifier: 'template-tags:operation'.","In RESTCONF, attributes are encoded using special notation that is explained in the 'RESTCONF' user guide.","Tags are inherited through the data-tree of the template. If data-tree element doesn't define any tag, then it is inherited from parent element.","Only single tag can be applied to one data node.","Tags can be applied to following YANG structures: container, list, leaf-list, leaf, list entry, leaf-list entry.","Currently, the following tags are supported:","merge: Merges with a node if it exists, otherwise creates the node.","replace: Replaces a node if it exists, otherwise creates the node.","delete: Deletes the node.","create: Creates a node. The node can not already exist. An error is raised if the node exists.","update: Merges with a node if it exists. If it does not exist, it will not be created."]},{"l":"Examples with tags","p":["A. Tags applied to container, list, and leaf","Template with name 'user_template' that contains 'merge', 'replace', and 'create' tags:","Description of all operations in the correct order that are done based on the defined tags:","Container 'configuration' will be merged to target UniConfig node(implicit root operation).","Container 'system:system' will be updated - its content is merged only, if it has already been created.","The whole list 'users' will replaced in the target UniConfig node.","Leaf named 'password' will be created at the target UniConfig node - it cannot exist under 'users' list entry, otherwise the error will be raised.","B: Tags applied to leaf-list, leaf-list entry, and list entry:","The following JSON represents content of sample template with multiple tags:","'replace' tag is applied to single list 'my-list' entry","'merge' tag is applied to whole 'leaf-list-a' leaf-list","'create' tag is applied to whole 'leaf-list-b' leaf-list","'delete' tag is applied to single leaf-list 'leaf-list-b' entry with value '10'"]},{"l":"Creation of template","p":["A new template can be created by sending PUT request to new template node under 'templates' topology with populated 'configuration' container. Name of the template equals to name of the 'node' list entry. This RESTCONF call must contain specified schema cache repository using the 'uniconfig-schema-repository' query parameter in order to successfully match sent data-tree with correct schema context (it is usually associated with some type of NETCONF device)."]},{"i":"example---creation-of-template","l":"Example - creation of template","p":["The following example shows creation of new template with name'interface_template' using 'schemas_1' schema repository. The body of the PUT request contains whole 'configuration' container."]},{"i":"readupdatedelete-template","l":"Read/update/delete template","p":["All CRUD operations with templates can be done using standard RESTCONF PUT/DELETE/POST/PLAIN PATCH methods. As long as template contains some data under 'configuration' container, next RESTCONF calls, that work with templates, don't have to contain 'uniconfig-schema-repository' query parameter, since type of the device is already known."]},{"i":"examples---restconf-operations","l":"Examples - RESTCONF operations","p":["Reading specific subtree under 'interface_template' - unit with name'{$unit-id}' that is placed under interface with name'eth-0/{$interface-id}'.","Changing 'update' tag of the 'address' list entry to 'create' tag using PLAIN-PATCH RESTCONF method."]},{"l":"RPC get-template-info","p":["This RPC shows information about all variables in specified template. The RPC input has to contain template name."]},{"i":"creation-of-template-1","l":"Creation of template"},{"l":"Usage of RPC"},{"l":"RPC get-template-nodes","p":["This RPC returns all templates from the template topology. No input body is required."]},{"l":"Successful example","p":["There are no templates in the template topology."]},{"i":"successful-example-1","l":"Successful example","p":["There is a template called 'test-template' in the template topology."]},{"l":"Upgrading template to latest yang repository","p":["Template can be upgraded to latest YANG repository using 'upgrade-template' RPC. This procedure consists of:","Read template- Reading of template configuration from'templates' topology in Configuration datastore.","Version-drop- Conversion of template into target schema context that is created by specified yang-repository. Because of this feature, it is possible to change template between different versions of devices with different revisions of YANG schemas but with similar structure. Version-drop is also aware of 'ignoredDataOnWriteByExtensions' RESTCONF filtering mechanism.","Removal of previous template / writing new template- If'upgraded-template-name' is not specified in RPC input, previous template will be deleted and replaced by new one. If it is specified, previous template will not be deleted.","Description of input RPC fields:","template-name: Name of the existing input template. This field is mandatory.","upgraded-template-name: Name of upgraded/new template. This field is optional.","yang-repository: Name of YANG repository against which version-dropping is used. This field is optional. If no yang-repository is specified, latest yang repository will be used.","Description of fields in RPC response:","No fields are used, only HTTP response codes [200 - OK, 404 - Fail]"]},{"i":"usage-of-rpc-1","l":"Usage of RPC"},{"l":"Auto-upgrading of templates","p":["This feature is used to automatically upgrade all stored templates using the old YANG repository to the latest YANG repository with help from the version-drop procedure. For the auto-upgrading process to work, the latest YANG repository must already be configured. The upgrade process must be explicitly enabled in the configuration file and occurs when UniConfig is started.","There is also an option to back up templates before the upgrade with the standard rotation procedure. The names of backed-up templates follow the pattern ' backup', where '' represents the name of the original template and '' represents the backup index. The most recent backup index is always '0' and older ones are rotated by incrementing the corresponding index. If a backed-up template reaches the configured limit (maximum number of backups), it is permanently removed from the database.","Overview of available settings ('config/lighty-uniconfig-config.json'):","enabledTemplatesUpgrading- Enables the auto-upgrading process at UniConfig startup. If disabled, the other setting is ignored.","backupTemplatesLimit- Maximum number of stored backup templates. If exceeded, older templates are removed during the rotation procedure. If set to 0, templates are not backed up at all."]},{"l":"Application of template","p":["Application of tags- Data-tree of the template is streamed and data is applied to target UniConfig node based on set tags on data elements, recursively. UniConfig node configuration is updated only in the Configuration datastore.","Description of fields in RPC response:","Description of input RPC fields:","error-message(optional): Description of the error that occurred during application of template.","error-type(optional): Type of the error.","leaf-list-values: List of values - it can be used only with leaf-lists. Special characters ('$', '{', '}') must be escaped.","leaf-value: Scalar value of the variable. Special characters('$', '{', '}') must be escaped.","node-id: Target UniConfig node identifier (key of the list).","node-result: Per target UniConfig node results. The rule is following - all input UniConfig node IDs must also present in the response.","overall-status: Overall status of the operation as the whole. If application of the template fails on at least one UniConfig node, then overall-status will be set to 'fail' (no modification will be done in datastore). Otherwise, it will be set to 'complete'.","Processing template configuration","Read template- Reading of template configuration from'templates' topology in Configuration datastore.","RPC apply-template","status: Status of the operation: 'complete' or 'fail'.","String-substitution- Substitution of variables by provided values or default values, if there aren't any provided values for some variables and leaf/leaf-list defines a default values. If some variables cannot be substituted (for example, user forgot to specify input value of variable), an error will be returned.","Template can be applied to UniConfig nodes using 'apply-template' RPC. This procedure does following steps:","template-node-id: Name of the existing input template.","The following sequence diagram and nested activity diagram show process of 'apply-template' RPC in detail.","uniconfig-node-id: Target UniConfig node identifier.","uniconfig-node: List of target UniConfig nodes to which template is applied ('uniconfig-node-id' is the key).","variable-id: Unescaped variable identifier.","variable: List of variables and substituted values that must be used during application of template to UniConfig node. Variables must be set per target UniConfig node since it is common, that values of variables should be different on different devices. Leaf'variable-id' represents the key of this list.","Version-drop- Conversion of template into target schema context that is used by target UniConfig node. This component also drops unsupported data from input template. Because of this feature, it is possible to apply template between different versions of devices with different revisions of YANG schemas but with similar structure. Version-drop is also aware of 'ignoredDataOnWriteByExtensions' RESTCONF filtering mechanism."]},{"i":"examples---apply-template-calls","l":"Examples - apply-template calls","p":["Successful application of the template 'service_group' to 2 UniConfig nodes - 'dev1' and 'dev2'.","Failed application of the template 'temp1' - template doesn't exist.","Failed application of the template 'service_group' to 2 UniConfig nodes","'dev1' and 'dev2' - user hasn't provided values for all required variables.","Failed application of the template 'redundancy_template' to UniConfig node 'dev1' - type of the substituted variable value is invalid (failed regex constraint)."]},{"l":"RPC create-multiple-templates","p":["One or more new templates can be created by this RPC. Templates are parsed and written in parallel for better performance. If specified templates already exist, their configuration is replaced. Execution of RPC is atomic - either all templates are successfully created or no changes are made in the UniConfig transaction.","Description of input RPC fields:","template-name:Name of the created template.","yang-repository: YANG schema repository used for parsing of template configuration. Default value: 'latest'.","template-configuration: Whole template configuration.","tags: List of template tags that are written on the specified paths in all created templates. Specified tag type must be prefixed with 'template-tags' module name based on RFC-8040 formatting of identityref.","Only template-name and template-configuration are mandatory fields."]},{"l":"Examples","p":["Successful creation of templates.","Failed to find YANG schema repository.","Failed to parse template configuration.","Creation of 2 templates with separately specified template tags - 'replace' tag is added to '/acl/category' and'/services/group=default/types' elements, while 'create' is added to '/services' element."]}],[{"i":"uniconfig---sending-and-receiving-data-restconf","l":"UniConfig - Sending and receiving data (RESTCONF)"},{"l":"Overview","p":["RESTCONF is described in RESTCONF RFC 8040. Put simply, RESTCONF represents a REST API for accessing datastores and UniConfig operations."]},{"l":"Datastores","p":["There are two datastores:","Config: Contains data representing the intended state. Possible to read and write via RESTCONF.","Operational: Contains data representing the actual state. Possible only to read via RESTCONF.","Each request must start with the URI /rests/. By default, RESTCONF listens on port 8181 for HTTP requests."]},{"l":"REST Operations","p":["RESTCONF supports: OPTIONS, GET, PUT, POST, PATCH, and DELETE operations. Request and response data can be either in the XML or JSON format.","XML structures according to YANG are defined at: XML-YANG.","JSON structures are defined at: JSON-YANG.","Data in the request must set the Content-Type field correctly in the HTTP header with the allowed value of the media type. The media type of the requested data must be set in the Accept field. Get the media types for each resource by calling the OPTIONS operation.","Most of the paths use Instance Identifier. is used in the explanation of the operations and must adhere to these rules:","Identifier must start with :> where is a name of the YANG module and is the name of a node in the module. If the next node name is placed in the same namespace as the previous one, it is sufficient to just use after the first definition of:. Each has to be separated by /."," can represent a data node which is a list node, container, leaf, or leaf-list YANG built-in type. If the data node is a list, there must be defined ordered keys of the list behind the data node name, for example, =,. ..","The following example shows how reserved characters are percent-encoded within a key value. The value of \"key1\" contains a comma, single-quote, double-quote, colon, double-quote, space, and forward slash (,'\":\" /). Note that double-quote is not a reserved character and does not need to be percent-encoded. The value of \"key2\" is the empty string, and the value of \"key3\" is the string \"foo\".","Example URL: /rests/data/example-top:top/list1=%2C%27\"%3A\"%20%2F,,foo","The format : has to be used in this case as well. Module A has node A1. Module B augments node A1 by adding node X. Module C augments node A1 by adding node X. For clarity, it has to be known which node is X (for example: C:X)."]},{"l":"Mount point","p":["The purpose of yang-ext:mount container is to access southbound mountpoint, when the node is already installed in Uniconfig (After install-node RPC). It exposes operations for reading device data which can only be done under connection-specific topology (cli/netconf) with defined node-id in URI. In this case, the URI has to be in the format/ yang-ext:mount/. The first is the path to a mount point and the second is the path to subtree behind the mount point. An URI can end in a mount point itself by using /yang-ext:mount. In this case, if there is no content parameter, whole operational and configuration data will be read.","Examples of retrieving data behind yang-ext:mount","In this request, we are using parameter content=config, this means we are reading candidate NETCONF datastore. Value config of parameter content is translated into get-config NETCONF RPC.","In this request we are using parameter content=nonconfig, which means that we are reading running NETCONF datastore. Value nonconfig is translated into get NETCONF RPC. We can compare it with data directly from device using show running-config command.","Examples of invocation of yang actions behind yang-ext:mount.","Invocation of yang action -> List available firmware packages on disk","Invocation of yang action -> Erase running-config-then load","To completely understand installing of node see Device installation."]},{"l":"HTTP methods"},{"i":"options-rests","l":"OPTIONS /rests","p":["Returns the XML description of the resources with the required request and response media types in Web Application Description Language (WADL)."]},{"i":"get-restsdataidentifiercontentconfig","l":"GET /rests/data/?content=config","p":["Returns a data node from the Config datastore."," points to a data node that must be retrieved.","Value 'config' represents default value of content query parameter - it doesn't have to be specified, if user would like to read intended/uncommitted changes from Config datastore.","Request GET '/rests/data/' would return the same data."]},{"i":"get-restsdataidentifiercontentnonconfig","l":"GET /rests/data/?content=nonconfig","p":["Returns the value of the data node from the Operational datastore."," points to a data node that must be retrieved."]},{"i":"get-restsdataidentifiercontentall","l":"GET /rests/data/?content=all","p":["Returns a data node from both Config and Operational datastores. The outputs from both datastores are merged into one output."," points to a data node that must be retrieved."]},{"i":"put-restsdataidentifier","l":"PUT /rests/data/","p":["Updates or creates data in the Config datastore and returns the state about success."," points to a data node that must be stored.","Content type does not have to be specified in URI - it can only be the Configuration datastore."]},{"i":"post-restsdataidentifier","l":"POST /rests/data/","p":["Creates the data if it does not exist in the Config datastore, and returns the state about success."," points to a data node where data must be stored.","The root element of data must have the namespace (data is in XML) or module name (data is in JSON)."]},{"i":"post-restsdata","l":"POST /rests/data","p":["Creates the data if it does not exist under data root.","In the following example, the 'toaster' module is the root container in YANG (it doesn't have any parent). This example also makes it clear that URI doesn't contain 'toaster' node in comparison to a PUT request that must contain the name of the created node in URI."]},{"i":"delete-restsdataidentifier","l":"DELETE /rests/data/","p":["Removes the data node in the Config datastore and returns the state about success."," points to a data node that must be removed."]},{"i":"patch-restsdataidentifier","l":"PATCH /rests/data/","p":["The patch request merges the contents of the message-body with the target resource in the Configuration datastore (content-type query parameter is not specified)."," points to a data node on which PATCH operations is invoked.","This request is implemented by Plain PATCH functionality, see more details on the following page: RFC-8040 documentation - Plain PATCH operation.","Plain patch can be used to create or update, but not delete, a child resource within the target resource. Any pre-existing data which is not explicitly overwritten will be preserved. This means that if you store a container, its child entities will also merge recursively.","The following example shows the PATCH request used for modification of Ethernet interface IP address and two connection settings. Note that other settings under system:system container are left untouched including other leaves under 'connection' container and 'ethernet' list item."]},{"i":"patch-restsdataidentifierapply-tagstrue","l":"PATCH /rests/data/?apply-tags=true","p":["The patch request with parameter apply-tags=true allows to use tags.","Tags allows us to use differrent operation for separate elements instead of merging whole content as without tags.","The following tags are supported: merge, replace, delete, create and update.","Usage of these tags are explained in Templates manager : here.","The following example shows PATCH request used for modification of interfaces on IOS XE device including creating, deleting, and replacing interface configuration."]},{"i":"post-restsoperationsmodulenamerpcname","l":"POST /rests/operations/:","p":["Invokes RPC on the specified path.",": - is the name of the module and is the name of the RPC in this module.","The Root element of the data sent to RPC must have the name “input”.","The result has the status code and optionally retrieved data having the root element “output”.","The answer from the server could be:","GET /rests/operations request can be used to retrieve all available RPCs that are registered in distribution.","More information is available in the RESTCONF RFC 8040."]},{"i":"post-restsdatapath-to-operation","l":"POST /rests/data/","p":["Invokes action on the specified path in the data tree.","Placeholder represents data path to operation definition that is specified under composite data schema node in YANG (only containers and lists may contain action definition).","Content query parameter doesn't have to be specified (it will be ignored), action is represented equally in Operational and Config datastore.","Both RFC-8040 (YANG 1.1) and TAIL-F actions are supported. TAIL-F actions can be placed in both YANG 1.0 and YANG 1.1 schemas. There aren't any differences in the invocation of these types of actions using RESTCONF API.","The body of the action invocation request may contain a root 'input' container. If the action definition has no specified input container, it is not required to specify the body in the request.","The response contains the status code and optionally retrieved data having the root element 'output'.","Currently, FRINX UniConfig only supports invocation of actions under NETCONF mountpoint, must contain'yang-ext:mount' container.","Structure of 'input' and 'output' elements are the same as the structure of these containers when we invoke YANG RPC.","Assume the following YANG snippet with root container named'interfaces':","Invocation of the action named 'compute-stats' that is placed under the'interfaces' container of NETCONF mountpoint:","Difference between RPCs and actions: Actions are bound to a data tree and they can be placed under containers and lists (they cannot be specified as root entities in YANG schema). RPCs are not placed in the data tree and for this reason, they can only be specified as root entities in the YANG schema."]},{"l":"Selecting Data","p":["For selecting and identifying data is good to use query parameter fields. This parameter has to be used only with the GET method. The response body is output filtered by field-expression as the value of fields parameter."]},{"l":"Fields","p":["The response body is the output filtered by the field-expression as a value of the fields parameter.","The example of using the fields parameter: path?fields=field_expression","There are several rules, that need to be followed:","For filtering more than one field of the same parent, \";\" needs to be used. Example : path?fields=field1;field2, where field1 and field2 has the same parent, which is the very last part of the path.","For nesting, \"/\" needs to be used. Example : path?fields=field1;pathField/field2, where field1 and field2 has not the same parent, but pathField is on the same level as field1.","This is a different approach to do nesting, however, the difference between \"(\" and \"/\" is that once we use \"/\" for specifying some field, we cannot identify another field from the upper layers.","This is the case where pathField1 and pathField2 have the same parent, this is not allowed, because once we use \";\" it is expected to specify fields on the same layer as field1","Examples: With 2 approaches (nesting, sub-selecting)","Example of filtering the entire configuration of all interfaces (name, with the config):","Example of filtering all names of interfaces and all names of configs of interfaces:","Example of filtering all names of interfaces with type from the config of interfaces:"]},{"l":"Filtering Data","p":["For filtering data based on specific value is good to use jsonb-filter query parameter. This parameter has to be used only with the GET method."]},{"l":"Jsonb-filter","p":["JSONB filtering"]},{"l":"Pagination","p":["To further extend the ability to filter data according to our needs, we can use pagination in the GET method.","There are 3 pagination parameters that can be used individually or in combination with each other :","offset : This parameter lets us choose on which list entry value we want data to start rendering.","limit : Limit gives us the option to control how many node values are going to be displayed in our GET request.","fetch=count : Used to obtain the amount of children nodes in a specific node.","Beware that pagination works only for list nodes.","The example of using individual pagination parameter:","The example of using two pagination parameters simultaneously:","The example of using fetch count parameter:","The response body of fetch count parameter with a path from the previous example:"]},{"l":"Sorting","p":["This utility helps us to sort list data from GET request according to our needs in ascending or descending order.","To sort some data, use a query parameter called sortby that will include at least one identifier of child leaf and sort direction. The first part of the value represents leaf identifier, the second part enclosed in brackets represents sort direction ('asc' or 'desc'). If there are multiple leaves based on which sorting is done, they are separated by semicolon.","Sorting, just like pagination, can only be used on list nodes.","The example of using sortby parameter with 1 value (sorting by the value of 'name' leaf):","The example of using sortby parameter with 2 values (sorting by values of 'name' and 'revision' leaves, in that order):","The example of using sortby and pagination simultaneously:","It is possible to specify module-name as part of the leaf identifier. Module-name must be specified only if there are multiple children leaves with the same identifier but specified from different namespaces. Example:","In the case of union types specified on leaf nodes, sorting is done in the blocks that are ordered by the following strategy:","leaves without value","empty type","boolean type","random numeric type","types that can be represented by JSON string"]},{"l":"Inserting"},{"l":"Insert query parameter","p":["The 'insert' query parameter can be used to specify how an item should be inserted within an list or leaf-list. This parameter is only supported for the POST and PUT methods. It is also only supported if the target list or leaf-list is marked as 'ordered-by user' in YANG model.","The allowed values for 'insert' query parameter:","Value","Description","first","Insert the new item as the new first entry.","last","Insert the new item as the new last entry (default value).","before","Insert the new item before the insertion point, as specified by the value of the 'point' query parameter.","after","Insert the new data after the insertion point, as specified by the value of the \"point\" parameter.","If the values 'before' or 'after' are used, then a 'point' query parameter for the 'insert' query parameter MUST also be present."]},{"l":"Point query parameter","p":["The 'point' query parameter is used to specify the insertion point for an item that is being created or moved within an'ordered-by user' list or leaf-list. Like the 'insert' query parameter, 'point' query parameter is only supported for the POST and PUT methods and also if the target list or leaf-list is marked as 'ordered-by user' in YANG model. The value of the 'point' query parameter is a string that indicates the key of the insertion point item. If the key is composite, the key items must be separated by a comma."]},{"l":"Examples","p":["Next five examples show usage of 'insert' and 'point' query parameters for leaf-list. First example shows how leaf-list looks before update. There are no differences in the use of the list and leaf-list."]},{"l":"List before update"},{"l":"Insert item at the top of the list"},{"l":"Insert item at the bottom of the list"},{"l":"Insert item after specific item"},{"l":"Insert item before specific item"},{"l":"Retrieving data"},{"l":"With-defaults query parameter","p":["All data nodes are reported, including any data nodes with YANG default in scheme, which are not set by client are reported.","Data nodes set to its YANG schema default value are not reported.","Data nodes set to its YANG schema default value by the client are reported.","Description","Example Data Set By User:","Example YANG Module:","explicit","report-all","The 'with-defaults' query parameter is used to specify how information about default data nodes is returned in response to GET requests on data resources. The response body is output filtered by value of with-defaults parameter.","The allowed values for 'with-defaults' query parameter:","The example of using the with-defaults query parameter: path?with-defaults or path?with-defaults=value","trim","Using with-defaults without value is equivalent to value 'report-all'.","Value","Value Explicit","Value Report-All or Without Value","Value Trim"]},{"l":"JSON Attributes","p":["Node attributes can be encoded in JSON by wrapping all the attributes in the '@' container and values or arrays in the '#' JSON element. This notation is inspired by one that is used in the 'js2xmlparser' open-source tool (conversion between JSON and XML structures): js2xmlparser","RESTCONF supports both serialization and deserialization of attributes, GET response shows all set attributes in the read data-tree and PUT/POST/PLAIN PATCH methods can be used for the writing of data nodes with attributes. Warning: attributes cannot be directly addressed using RESTCONF URI that would contain the '@' element in the path, because attributes are always bound to some data node, they are not represented by distinct nodes in the data-tree.","Reserved '@' container may contain multiple attributes. Each attribute is encoded in the same fashion as leaf nodes, there is an identifier of the attribute and attribute value.","Format of the attribute that is defined in the [module]:","Format of the attribute that is defined in the same module as the parent data entity:"]},{"i":"example---leaf-with-attributes","l":"Example - leaf with attributes","p":["Leaf without attributes:","The same leaf with set 2 attributes: 'm1:attribute-1' and'm1:attribute-2':"]},{"i":"example-container-with-attributes","l":"Example: Container with Attributes","p":["A container without attributes:","The same container with set 2 attributes: 'm1:switch' and'm2:multiplier':"]},{"i":"example-leaf-list-with-attributes","l":"Example: Leaf-list with Attributes","p":["Leaf-list without attributes:","The same leaf with set 1 attribute: 'mx:split':"]},{"i":"example-leaf-list-entry-with-attributes","l":"Example: Leaf-list Entry with Attributes","p":["Leaf-list without attributes:","Two leaf-list entries, leaf-list entry with value '10' has one attribute with identifier 'm1:prefix'. The second leaf-list entry '20' doesn't have any attributes assigned."]},{"i":"example-list-with-attributes","l":"Example: List with Attributes","p":["List without attributes:","The same list with applied single attribute: 'constraints:length'."]},{"i":"example-list-entry-with-attributes","l":"Example: List Entry with Attributes","p":["List with two list entries without attributes:","The same list entries, the first list entry doesn't contain any attribute, but the second list entry contains 2 attributes: 'm1:switch' and 'm2:multiplier'."]},{"l":"Device Schema Filters","p":["By default, all input and output data produced by RESTCONF for the selected device is fully compliant with its YANG models. Any violation of the YANG schema definitions will result in an error. Some of these restrictions can be addressed by adding the 'schemaFilters' configuration parameter for the RESTCONF."]},{"l":"Configuration Options Overview","p":["Following configuration options for 'schemaFilters' make RESTCONF processing less restrictive:"]},{"l":"Configuration Example","p":["The following example demonstrates how to enable schema filters for selected extensions and make RESTCONF ignore unknown definitions and definitions with a 'deprecated status' attribute."]},{"i":"unhide-parameter-for-readwrite-operations","l":"Unhide Parameter for READ/WRITE Operations","p":["RESTCONF supports the 'unhide' query parameter for the GET requests to include hidden definitions into the response and for PUT/POST/PATCH requests to accept hidden definitions in the input. This parameter value can be populated with a comma-separated list of extensions to unhide or the keyword 'all' to include all possible hidden definitions in the response.","Example of using the 'unhide' parameter for the GET and PUT/POST/PATCH requests.","Using unhide with a list of extensions","Using unhide parameter to unhide all hidden definitions"]},{"l":"Leafref validation","p":["According to YANG standard there are constraints for leafrefs. These constraints are not validated by default. Leafref validation can be enabled using checkForReferences query parameter with value set to true."]},{"i":"example","l":"Example:"},{"l":"Using leafref validation"},{"l":"Example output of failed validation","p":["If checkForReferences parameter is set to false or is not provided UniConfig will not perform leafref validation and there will be no leafref validation error."]},{"l":"Hide Empty Data Nodes","p":["Query parameter 'hideEmptyDataNodes' is used to hide empty composite data-tree nodes in response to GET call. Data nodes that contain only attribute tag are considered to be empty too. Default value is 'false' - empty nodes are displayed in the GET response."]},{"i":"example-1","l":"Example"},{"l":"Escaping keys in URI","p":["Following characters must be escaped, if they are contained in a list key value:':', '/', '?', '#', '[', ']', '@', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '='.","There are 2 ways how to escape special characters in a key value: by encoding reserved UTF-8 characters using '%HH' patten or using key delimiter."]},{"l":"Encoding reserved characters","p":["RESTCONF RFC-8040 natively allows to specify reserved characters in a key value, if they are encoded using'%HH' pattern, where 'HH' refers to hexadecimal representation of UTF-8 character.","The following request demonstrates encoding of special characters in the 'ge0/0/1' interface name.","Mappings between special characters and UTF-8 codes can be found on following site: https://www.urlencoder.org/"]},{"l":"Demarcation of key using delimiter","p":["UniConfig allows to specify delimiter used for demarcation of list key value. Afterwards, all special characters inside key are automatically escaped.","By default, key delimiter is disabled. It must be specified in the 'config/lighty-uniconfig-config.json' file:","The following request demonstrates demarcation of interface name 'ge0/0/1' using '%22' delimiter."]},{"l":"Hide Attributes","p":["Query parameter 'hideAttributes' is used to hide composite data-tree nodes attributes in response to GET call. Default value is 'false' - nodes attributes are displayed in the GET response."]},{"i":"example-2","l":"Example"},{"i":"callbacks-http-client","l":"Callbacks (http-client)","p":["Callbacks include sending GET (call-point) and POST (action) requests to the remote server. They are implemented mainly for UniConfig Shell, but can also be used by RESTCONF for UniStore nodes by using the URI prefix:"]},{"i":"examples-1","l":"Examples","p":["Example - call-point invocation in RESTCONF","Response:","Example - action invocation in RESTCONF","Callbacks must be configured before use. For more details, see Callbacks."]}],[{"l":"UniConfig Queries","p":["This module is responsible for execution of queries on the configuration of some device, template, UniStore node, or snapshot."]},{"l":"RPC query-config","p":["UniConfig exposes filtering and selection API using RPC 'query-config'. Filtering and selection of configuration is done only on the database side - UniConfig receives already narrowed configuration with only selected data. Since query is evaluated by the database, this feature works only with already committed data (operational data).","The following sequence diagram captures the whole process of RPC execution in detail.","Execution of RPC query-config"]},{"l":"RPC input fields","p":["topology-id: Identifier of network-topology/topology list entry. Currently, supported topologies, under which this RPC can be used, are: 'uniconfig', 'templates', 'unistore', and snapshot topologies.","node-id: Identifier of specific network-topology/node list entry whose configuration is filtered using specified jsonb-path-query.","jsonb-path-query: JSONB-path query used for selection and filtering of subtrees in the node configuration stored in the PostgreSQL. JSONB-path must start from root \"frinx-uniconfig-topology:configuration\" container(it is always represented by absolute path).","JSONB-path query syntax is specified by PostgreSQL. You can find detailed description of all features with examples on the following link (version 14): https://www.postgresql.org/docs/14/functions-json.html#FUNCTIONS-SQLJSON-PATH"]},{"l":"RPC output fields","p":["config: List of selected and filtered JSON objects. Note that database may return multiple list entries, if the last element in the JSONB-path is represented by list/leaf-list YANG schema node. In other cases, only one or no JSON object is displayed on output based on fulfilling the filtering and selection criteria."]},{"i":"example-selection-of-json-object","l":"Example: selection of JSON object","p":["The following request demonstrated execution of simple selection query under the 'dev01' from 'uniconfig' topology. Response contains 1 JSON object - 'ssh' container.","JSONB-path query should always start with $.\"frinx-uniconfig-topology:configuration\" pattern because 'configuration' represents wrapping element for all root data elements that are stored in database.","Be aware that PostgreSQL requires escaping of special characters in the identifiers of JSON elements. For example,':' and '-' represent special characters. Because of this behaviour, it is always safer to put double quotes around all identifiers as it is done in this example."]},{"i":"example-filtering-list-of-json-objects","l":"Example: filtering list of JSON objects","p":["The next query demonstrates filtering of 'address' JSON objects using predicate based on 'ipv4-address'(the first octet must have a value '80'). Addresses under all 'controller' list entries are filtered. In this example, response contains multiple JSON objects representing 'address' list entries."]},{"i":"example-selection-of-leaf-list-content","l":"Example: selection of leaf-list content","p":["The next request shows selection of all addresses under ethernet interfaces with type 'vxlan' and 'enabled' flag set to 'true'. Response will contain aggregated array of strings, because 'address' is represented by leaf-list."]},{"i":"example-non-existing-node","l":"Example: non-existing node","p":["If node with specified identifier doesn't exist under target topology, RPC will return 400 with corresponding error message."]},{"i":"example-syntax-error","l":"Example: syntax error","p":["In case of invalid form of input 'jsonb-path-query', UniConfig will return 400 status code with error-message describing syntax error."]}],[{"l":"UniConfig Shell","p":["UniConfig shell is a command-line interface for Uniconfig.","Accessible over SSH, it allows users to interact with Uniconfig features including the following:","Read operational data of devices","Manipulate device configuration","Manipulate configuration templates","Manipulate data stored in Unistore","Invoke device or UniConfig operations","Manipulate global UniConfig settings","As Uniconfig shell is model-driven, its interface is mostly auto-generated from YANG schemas (e.g., tree structure of data-nodes or available RPC/action operations)."]},{"l":"Configuration","p":["By default, UniConfig shell is disabled. To enable it, set the configuration parameter cliShell/sshServer/enabled to true in the config/lighty-uniconfig-config.json file.","All available settings and descriptions are listed below:","After starting UniConfig, the SSH server will listen for connections on port 2022 and the loopback interface."]},{"l":"Navigating in the shell","p":["Every command line starts with a command prompt that ends with the character. The identifier of the command prompt changes based on the current shell mode and the state of execution in this mode.","The commands exit and quit are available in all shell modes:","exit returns the state to the parent state","quit returns the state to the nearest parent mode (e.g., configuration mode, root mode, operational show mode). If the current state of the shell represents some mode, 'quit' and 'exit' have the same effect of returning to the parent mode.","Typed commands are sent to UniConfig using the ENTER key. UniConfig processes the command and may send a response to the console depending on the command behaviour. All commands are processed synchronously, meaning that multiple commands cannot be executed in parallel in the same SSH session.","CTRL-A and CTRL-E move the cursor to the beginning or end of the current line.","CTRL-L clears the shell screen.","Arrow keys UP/DOWN are used to load previous commands in the command history.","CTRL-C cancels the current line and moves to a new blank line.","TAB loads suggestions in the current context. Hit TAB again to navigate through suggested commands using the arrow keys and select using ENTER. Leave the submode with suggestions using the shortcut CTRL-E. The text in brackets contains a description of the next command.","If the output is longer than the length of the command-line window, the output is displayed with scrolling capability. Use ENTER to display the next line and SPACE to display the next page. Use the q key to leave scrolling mode. You can only scroll only in one direction, towards the end of the output.","Scrolling through long output"]},{"l":"Root mode","p":["Root mode is the initial mode after successful authentication.","Example: Log into UniConfig shell:","The exit command is used to exit the UniConfig shell interface altogether (disconnecting SSH client).","Example - Exit UniConfig shell:","Currently, only username/password single-user authentication is supported as configured in the application.properties file."]},{"l":"Accessing sub-modes","p":["Root mode acts as a gateway to open the configuration and show modes.","Example - Switch to configuration mode:"]},{"l":"Show command history","p":["The show-history command is used to display a list of N last invoked commands. This command is also available in configuration mode.","Example - Show the last five executed commands:","Note that the list of invoked commands persists across UniConfig restarts and SSH connections."]},{"l":"Unhide and hide operations","p":["The following commands are used to unhide and hide attributes in application properties:","unhide-get is used to unhide an attribute hidden in application properties for read purposes.","unhide-set is used to unhide an attribute hidden in application properties for write purposes.","hide-get is used to hide attributes that were unhidden with unhide-get.","hide-set is used to hide attributes that were unhidden with unhide-set.","When unhide is set for a GET or SET operation, the request URL for the operation contains the unhide query parameter. In the following example, the unhide parameter is set to all:","http://localhost:8181/rests/data/network-topology:network-topology/topology=uniconfig/node=vnf21/configuration?unhide=all","The command also gives confirmation that the attribute was added to or removed from the unhidden list.","When unhide-get or hide-get are called without parameters, the output contains a list of all unhidden parameters. The same applies to unhide-set and hide-set.","When used with the parameter all, the unhide operation applies to all parameters defined in application properties for read or write purposes."]},{"l":"Configuration mode","p":["Configuration mode provides access to the following:","CRUD operations on top of persisted UniConfig, UniStore and template nodes","CRUD operations on top of persisted UniConfig settings","UniConfig RPC operations such as commit or calculate-diff","After opening configuration mode, a new UniConfig transaction is created. All operations invoked in configuration mode are executed in the scope of the created transaction. The transaction is automatically closed after leaving configuration mode ( exit or quit command).","If commit or checked-commit are invoked, the transaction is automatically refreshed. The user stays in configuration mode with a newly created transaction.","Commands like SET / SHOW / DELETE are now available only on a specific device and are not accessible in root configuration mode."]},{"l":"Show configuration","p":["The show operation can be used to display selected subtrees.","The subtree path can be constructed interactively with the help of shell suggestions / auto-completion mechanism. Construction of the path works the same way for SET / SHOW/ DELETE operations.","Example - Display the configuration of a selected container:","First move into a specific topology on a specific device:","After this, the show operation is available:"]},{"l":"Delete configuration","p":["The delete operation removes a selected subtree.","Example - Remove a container:","First move to a specific topology on a specific device:","After this, the delete operation is available:","Quit to configuration mode, commit using request mode and return to the device on the topology:"]},{"l":"Set configuration","p":["The set operation can be used for the following:","Set the value of a single leaf.","Set the values of multiple leaves in a single shell operation.","Set a list of values for a leaf-list.","Replace the entire subtree using a JSON snippet.","Example - Set the value of a single leaf:","Example - Set values for multiple leaves under the 'hold-time' container:","A JSON snippet can be written to a selected data-tree node by entering the json sub-mode. In this sub-mode, you can type multiple lines that must represent a well-formed JSON document. At the end, confirm the set operation using the pattern 'w!' + newline, or cancel the set operation with the pattern 'q!' + newline.","Example - Replace configuration of an interface using a JSON snippet:","Example - Leave json sub-mode without executing set operation:"]},{"l":"Execute UniConfig operation","p":["The request command is used to execute UniConfig operations such as commit or calculate-diff in the UniConfig transaction:","The command is available in configuration mode.","You can fill in input parameters and values interactively or via provided JSON snippet.","Example - Execute UniConfig RPCs in the scope of the open UniConfig transaction:"]},{"l":"Request operational mode","p":["This command has been merged with request configuration mode and is now only available in configuration mode.","Request mode allows users to:","Invoke selected UniConfig requests that read or alter UniConfig settings.","Invoke RPCs or actions that are provided by network devices or other southbound mount-points.","Input parameters and values can be filled in interactively or via a provided JSON snippet. The transaction is passed from configuration mode.","Example - Invoke RPC execute-and-read with typed input parameters:","Example - Execute the same RPC 'execute-and-read' using input JSON:","UniConfig shell does not support interactive typing of input arguments for an RPC/action that contains the list YANG element. Such operations must be executed using input JSON."]},{"l":"Show operational mode","p":["Show mode allows users to:","Display operational data about UniConfig itself (e.g., logging status, list of open transactions or list of acquired subscriptions).","Display operational data of network devices.","After opening show mode, a new UniConfig transaction is opened. The transaction is closed when you leave this mode.","Example - Display configuration of selected subtree:","Example - Display selected system configuration:"]},{"l":"Pipe operations","p":["UniConfig shell supports pipe operations similar to Unix shell/bash pipes. When a command is followed by the pipe sign |, the output of the command is passed to the selected pipe operation.","Example:","Supported pipe operations are:","grep - Show only lines that match supplied regex","match - Same as grep, but can be used with optional parameters to also show lines before and after matched lines","context-match - Same as grep, but also shows parent structure","brief - Display root elements in short table format","hide-empty-data-nodes - Hide data nodes without child nodes","hide-attributes - Hide attributes of data nodes"]},{"l":"Redirecting output","p":["The output of an executed command can be redirected to a file using the sign followed by a filename.","Example:","In this case, output in the console is empty but the content of the output.txt file is a follows:"]},{"l":"Aliases","p":["You can define aliases in UniConfig shell. For this purpose, there is a json file named shell-aliases in the UniConfig distribution. After unpacking the UniConfig distribution, the file can be found under Uniconfig/distribution/packaging/zip/target/uniconfig-x.x.x/config. The file contains some sample aliases."]},{"l":"Alias creation","p":["Aliases cannot be created dynamically, only before Uniconfig is started. The following rules apply:","The alias name must be unique and cannot contain whitespaces.","The command can contain a wildcard (*). In this case, the user is prompted to add a value.","The alias is only visible in the mode where it was defined.","Example - Execute the alias 'diff xr5':","Example: Execute the alias 'lbr':","Example - Execute the alias 'shh':"]},{"l":"Callbacks","p":["Callbacks include sending POST and GET requests to the remote server and invoking user scripts from the UniConfig shell.","The following is required to use callbacks:","Necessary YANG modules - YANG modules that are required by the callbacks.","Configuration - Enable callbacks in config/application.properties and set the remote server and access token.","Update repository - Add the necessary YANG modules from step 1 into at least one YANG repository in the cache directory, and either define remote endpoints and scripts in a YANG file or create a new one for callbacks. For a definition of remote endpoints, use the frinx-callpoint@2022-06-22.yang extension.","UniStore node - Create a UniStore node using the YANG repository containing the necessary YANG modules from step 1 and a YANG file with defined endpoints and scripts.","In UniConfig shell, step 4 is optional as UniConfig creates dummy UniStore nodes for all repositories that meet the conditions in step 3. In this case, the dummy UniStore node name is identical to the YANG repository name.","In RestConf, step 4 is mandatory."]},{"l":"Necessary YANG modules","p":["The following YANG modules are required:","frinx-callpoint@2022-06-22.yang(not needed for scripts)","tailf-common@2018-11-12.yang","tailf-meta-extensions@2017-03-08.yang","tailf-cli-extensions@2018-09-15.yang"]},{"i":"configuration-1","l":"Configuration","p":["By default, callbacks are disabled and the host and port for the remote server are empty in config/lighty-uniconfig-config.json.","To enable callbacks, set the configuration parameter callbacks/enabled to true. It is also necessary to set the host and port for the remote server and store an access token in the UniConfig database.","The host and port for the remote server can be set in three ways:","Before starting Uniconfig, in the config/application.properties file. The port number is optional:","After starting UniConfig, with a PUT request:","After starting UniConfig, with cli-shell:","The access token can be stored in the UniConfig database in one of two ways:","Available settings and descriptions for callbacks are listed below:"]},{"l":"Update repository","p":["First, create or update the YANG repository by using the frinx-callpoint@2022-06-22.yang extension displayed in the following snippet. There is only one extension, url, with the argument point."]},{"i":"add-call-point-get-request","l":"Add call-point (GET request)","p":["The following snippet shows how to create a call-point in the frinx-test YANG file by using the frinx-callpoint@2022-06-22.yang extension.","The argument of the 'url' extension is '/data/from/remote', which is appended to the end of the remote server URI configured in 'config/lighty-uniconfig-config.json'. Thus the final address for the remote call-point is'https://remote.server.io/data/from/remote'."]},{"i":"add-action-post-request","l":"Add action (POST request)","p":["The following snippet shows how to create an action in the frinx-test YANG file by using the frinx-callpoint@2022-06-22.yang extension. You must also import tailf-common.yang.","The action consists of:","The action name, defined by tailf:action.","The suffix for the remote endpoint, defined by fcal:url.","The input that contains body of the request. This part is optional."]},{"l":"Add script","p":["The following snippet shows how to create a script in the frinx-test YANG file by using tailf-common.yang. It is not necessary to import the frinx-callpoint@2022-06-22.yang extension.","The script consists of:","The script name, defined by tailf:action.","The path to the script, defined by tailf:exec.","Arguments for the script, defined by tailf:exec.","Arguments can be dynamic (i.e., the user can pass values to them) or static (flags). Follow these conventions when creating arguments:","Each argument must contain a name (for example, -n, -j).","Dynamic arguments must be enclosed in $(...)(for example, $(name)).","Flags are simple words without whitespace (for example, VIP, UPPER, upper)."]},{"l":"UniStore node","p":["A UniStore node can be created by RestConf or UniConfig shell. If a repository is explicitly defined by the query parameter ?uniconfig-schema-repository=repository-name, this repository must contain all necessary YANG modules. If a repository name is not defined when the UniStore node is created, all necessary YANG modules must be in the latest schema repository."]},{"l":"Examples","p":["Example - Invoke callpoint in shell:","Example - Invoke action in shell:","Example - Execute user script in shell:"]}],[{"l":"UniStore API"},{"l":"Introduction","p":["UniStores nodes are used for storing and management of various settings/configuration inside UniConfig. The difference between UniStore and UniConfig nodes is that UniConfig nodes are backed by a(real/network) device whereas UniStore nodes are not reflected by any real device. In case of UniStore nodes, UniConfig is used only for management of the configuration and persistence of this configuration into PostgreSQL DBMS.","Summarized characteristics of UniStore nodes:","UniStore nodes are not backed by 'real' devices / southbound mount-points - they are used only for storing some configuration - configuration is only committed to PostgreSQL DBMS.","Configuration of UniStore node can be read, created, removed, and updated the same way as it is done with UniConfig topology nodes - user can use the same set of CRUD RESTCONF operations and supported UniConfig RPCs for operation purposes.","UniStore nodes are placed in a dedicated 'unistore' topology under network-topology nodes. The whole configuration is placed under'configuration' container.","UniStore configuration is modelled by user-provided YANG schemas that can be loaded into UniConfig - at creation of UniStore node, user must provide name of the YANG repository, so UniConfig known how to parse configuration (query parameter'uniconfig-schema-repository').","UniConfig operations that are supported for UniStore nodes:","all RESTCONF CRUD operations","commit / checked-commit RPC","calculate-diff RPC (including git-like-diff flavour)","subtree-manager RPCs","replace-config-with-oper RPC","revert-changes RPC (transaction-log feature)","Node ID of UniStore node must be unique among all UniConfig and UniStore nodes."]},{"l":"Commit operation","p":["Actions performed with UniStore nodes during commit operations:","Configuration fingerprint verification - if another UniConfig transaction has already changed one of the UniStore nodes touched in the current transaction, then commit operation must fail.","Calculation of diff operation across all changed UniStore nodes.","Writing intended configuration into UniConfig transaction.","Rebasing actual configuration by intended in the UniConfig transaction.","Updating last configuration fingerprint to the UUID of committed transaction.","Writing transaction-log into transaction.","Committing UniConfig transaction - cached changes are sent to PostgreSQL DBMS."]},{"l":"Example use-case"},{"l":"Preparation of YANG repository","p":["User must feed UniConfig with YANG repository, that will be used for modeling of UniStore node configuration. The same UniStore node can me modeled only by 1 YANG repository, however, different nodes can track next different YANG repositories. YANG repository can be provided to UniConfig by copying directory with YANG files under 'cache' parent directory. Afterwards it is loaded either at startup or in runtime using'register-repository' RPC.","For demonstration purposes, let's assume that cache contains YANG repository 'system' with simple YANG module:"]},{"l":"Creation of UniStore node","p":["The next request shows creation of new UniStore node 'global' using provided JSON payload and name of the YANG repository that is used for parsing of the provided payload (query parameter'uniconfig-schema-repository'). Note that this yang repository must be specified only at the initialization of UniStore node."]},{"l":"Reading content of UniStore node","p":["The following sample shows reading of UniStore node content using regular GET request. Query parameter 'content' is set to 'config' to point out the fact that UniStore node is cached only in the Configuration data-store of transaction (Operational data-store is at this time empty)."]},{"i":"calculate-diff-rpc-created-node","l":"Calculate-diff RPC (created node)","p":["Calculate-diff operation is also supported for UniStore nodes. the following request shows difference of all touched nodes in the current transaction including UniStore nodes. Since UniStore node has only been created, diff output only contains 'created-data' with whole root'settings' container."]},{"l":"Persistence of UniStore node","p":["In case of UniStore nodes, commit RPC is used for confirming done changes and storing them into PostgreSQL DBMS. As it was explained in the previous section, commit operation causes storing of UniStore node configuration and transaction-log in the DBMS, operation doesn't touch any network device.","It is possible to combine changes of UniStore and UniConfig nodes in the same transaction and commit them at once."]},{"l":"Reading committed configuration","p":["The configuration is also visible in the Operation data-store of newly created transaction since it was committed in the previous step. The actual state can be shown by appending 'content=nonconfig' query parameter to GET request as it is shown in the next example."]},{"l":"Verification of configuration fingerprint","p":["Configuration fingerprint is used as part of the optimistic locking mechanism - by comparison of the configuration fingerprint from the beginning of the transaction and at commit operation it is possible to find out if other UniConfig transaction has already changed affected UniStore node. In case of UniStore nodes, fingerprint is always updated to the value of transaction-id (UUID) of the last committed transaction that contained the UniStore node."]},{"l":"Modification of configuration","p":["The same RESTCONF CRUD operations that can be applied to UniConfig nodes are also relevant within UniStore nodes. The following request demonstrates merging of multiple fields using PATCH operation."]},{"i":"calculate-diff-rpc-updated-node","l":"Calculate-diff RPC (updated node)","p":["The second calculate-diff RPC shows more granular changes done into existing UniStore node - it contains 'create-data' and 'updated-data' entries."]},{"l":"Commit made changes","p":["Persistence of made changes under UniStore node can be done using commit RPC."]},{"l":"Displaying content of transaction-log","p":["Committed transactions including all metadata (e.g serialized diff output or transaction ID) can be displayed by reading of'transactions-metadata' container in the Operational data-store. It also displays information about successfully committed UniStore nodes. Afterwards, user can leverage this information and revert some changes using transaction-id that is shown in the transaction-log."]},{"l":"Removal of UniStore node","p":["UniStore node can be removed by sending DELETE request to whole 'node' list entry, 'configuration' container, or by removing of all children'configuration' entities. In all cases, UniStore node will be removed after confirming of changes using commit RPC."]}],[{"l":"YANG Patch Operations","p":["Yang Patch is used for modification of subtrees under configuration. Advantages of YANG Patch in comparison to other RESTCONF operations:","YANG Patch may contain multiple edits with different operations applied to different subtrees","all edits inside YANG Patch are applied atomically - either all edits are successful or PATCH operation will fail and configuration will not be modified","supported reordering of lists (move operation) and inserting of list entry to specific position in the list(insert operation)","UniConfig supports all RFC-specified operations inside edits:","CREATE","REPLACE","MERGE","MOVE","INSERT","DELETE","REMOVE","RENAME","Using these operations, the user is able to reorder lists, create new data, remove data, or update specific data.","For more information, please refer to the official documentation of the RFC YANG patch"]},{"l":"RPC Examples"},{"l":"Creation of list entries","p":["The request creates new list entries in the tvi list. If the data exist, return an error."]},{"l":"Moving list entry","p":["The request moves an existing list entry on a user defined position."]},{"l":"Inserting new list entry","p":["The request inserts new list entries on a user defined position."]},{"l":"Inserting new leaf-list entry","p":["The request inserts a new leaf-list entry on a user defined position."]},{"l":"Replacing list entry","p":["The request replaces an existing value in a list entry."]},{"l":"Merging configuration","p":["The request merges an existing value in a list entry."]},{"l":"Delete list entry","p":["The request deletes a list entry. If the data is missing, returns an error."]},{"l":"Removing list entry","p":["The request removes a list entry."]},{"l":"Renaming list entry","p":["The request renames a list entry key."]},{"l":"Failed deleting of list entry","p":["The request to delete a list entry that is not present."]},{"l":"Sending Patch request with invalid structure","p":["The request is missing some data."]}],[{"l":"Operational Procedures"},{"l":"Logging","p":["The UniConfig distribution uses Logback as its logging framework. Logback is the successor to the log4j framework with many improvements, such as more options for configuration, better performance, and context-based separation of logs. Context-based separation of logs is used widely in UniConfig to achieve per-device logging based on the set marker in the logs."]},{"l":"TLS","p":["TLS is a widely adopted security protocol designed to facilitate privacy and data security for communications over the Internet. TLS authentication is disabled in the default version of UniConfig."]},{"l":"TLS for Postgres database","p":["By default, UniConfig communicates with the database without TLS and traffic is therefore unencrypted. When the database is deployed separately from UniConfig, we recommend that you enable TLS encryption."]},{"l":"OpenAPI","p":["The UniConfig distribution contains a '.yaml' file that generates list of all usable RPCs with examples. You can view it either locally or on our hosted version, which always shows the latest OpenAPI version."]},{"l":"Data Security Models","p":["UniConfig supports encryption and hashing of values in RESTCONF and UniConfig shell API, as well as managing confidential data during transfers between the UniConfig database and network devices."]},{"l":"UniConfig Clustering","p":["The UniConfig stateless architecture allows deployment of the system in a cluster to ensure horizontal scalability and high-availability properties."]},{"l":"Thread pools","p":["UniConfig uses thread pools in several places. They can be configured in the application.properties file."]},{"i":"data-flows--transformations","l":"Data flows & transformations","p":["There are multiple paths and transformations of data within Uniconfig. The following section provides more information on some of the more common paths.","Thread pools"]}],[{"l":"Data flows and transformations","p":["Architecture","Flows","CLI, direct to device, plaintext interface","CLI, direct from device, configuration data read","CLI, direct from device, operational data read","CLI, direct to device, configuration data write","Netconf, direct from device, configuration data read","Netconf, direct from device, operational data read","Netconf, direct to device, configuration data write","Uniconfig, cached intent configuration, data read","Uniconfig, cached applied configuration, data read","Uniconfig, applying intent to a device","Uniconfig, synchronizing applied configuration from network"]},{"l":"Architecture","p":["CLI- Southbound plugin for managing devices over CLI (SSH).","Data flow architecture","gNMI- Southbound plugin for managing devices over gNMI (SSH).","JSON YANG RFC provides information on how JSON is used.","NETCONF- Southbound plugin for managing devices over Netconf (SSH).","Northbound","Restconf RFC provides detailed information on YANG-based REST API specifics.","Restconf- REST API for Uniconfig.","SNMP- // TBD","Southbound","The following diagram outlines the basic architecture for this purpose. It gives a simplified overview and only includes a subset of components, but serves as a baseline for illustrating various data flows.","The following main components are included:","The Uniconfig core consists of many different components/features. A good place to start is the build-and-commit model in Uniconfig.","Uniconfig CLI shell- CLI interface for Uniconfig. Similar capabilities as RESTCONF, but intended for users who prefer CLI access.","Uniconfig core","Uniconfig java SDK documentation provides an overview.","Uniconfig Java SDK- Java SDK for Uniconfig. Uses Restconf internally.","Uniconfig restconf documentation provides an overview.","Uniconfig shell documentation provides an overview."]},{"l":"Flows"},{"i":"cli-direct-to-device-plaintext-interface","l":"CLI, direct to device, plaintext interface","p":["Flow for reading/writing arbitrary commands to a CLI device.","The User sends an HTTP POST REST (rpc) request to Uniconfig.","URL specifies Uniconfig defined execute-and-read or execute-and-expect RPC.","URL must specify the following:","topology=cli- CLI-managed device","node=nodeID- specific managed device","Restconf invokes an asynchronous RPC on the southbound layer, but blocks until it completes.","The CLI layer invokes a generic implementation of the plaintext access RPC and returns output from the device as is.","Restconf receives the data from the CLI layer and completes the request.","Restconf example:","To send an arbitrary command to a device and receive a response:","To send a sequence of commands (ssh expect style) and receive a response:","Flow diagram:","Data flow architecture"]},{"i":"cli-direct-from-device-configuration-data-read","l":"CLI, direct from device, configuration data read","p":["?content=config- to specify only configuration data must be read from device (if not present, defaults to config)","CLI readers send specific commands to the device and parse the output into an internal DOM data structure.","Data flow architecture","Flow diagram:","Flow for reading structured (YANG-model based) configuration data from a device over CLI. The data is always retrieved from the device with no cache involved.","node=nodeID- specific managed device","Restconf component parses the URL and validates it against openconfig YANG models.","Restconf example:","Restconf invokes an asynchronous read from the southbound layer, but blocks until it completes.","Restconf receives the data from CLI layer, serializes them into JSON and completes the request.","The CLI layer finds appropriate an CLI driver (cli units) and invokes all readers registered for a specific path provided in the URL.","The user sends an HTTP GET REST request to Uniconfig.","To get configuration data for all interfaces:","topology=cli- CLI-managed device","URL must conform to openconfig data models used for all CLI devices.","URL must specify the following:"]},{"i":"cli-direct-from-device-operational-data-read","l":"CLI, direct from device, operational data read","p":["Flow for reading structured (YANG-model based) operational data from a device over CLI. The data is always retrieved from the device with no cache involved.","This flow is identical to CLI, direct from device, configuration data read flow. The difference is that this READ returns a combination of configuration and operational data ! To invoke operational data read, use ?content=nonconfig in the URL, the rest of URL is no different","Warning! Be careful when requesting operation data from devices. The data can be massive and the act of reading such data can cause issues on device itself. Always be as specific as possible, i.e., use the most specific (longest) URL possible.","Restconf example:","To get configuration and operational data for all interfaces:","Flow diagram:","Data flow architecture"]},{"i":"cli-direct-to-device-configuration-data-write","l":"CLI, direct to device, configuration data write","p":["?content=config- only configuration data is read from the device","CLI writers send specific commands to the device and check the output for errors.","Data flow architecture","Flow diagram:","Flow for writing structured (YANG-model based) configuration data to a device over CLI. The data is transformed and sent directly to a device.","node=nodeID- specific managed device","Note: We do not recommend writing directly to a device. The preferred option is to use Uniconfig core to build an intent and commit the changes to the network.","Restconf component parses the URL and the payload and validates them against openconfig YANG models.","Restconf example:","Restconf invokes an asynchronous write on the southbound layer, but blocks until it completes.","Restconf receives a success or failed response from the CLI layer and maps it to the appropriate status code.","The CLI layer finds the appropriate CLI driver (cli units) and invokes all writers registered for a specific path provided in the URL.","The payload must contain valid JSON that correspondos to the URL points within the YANG model.","The user sends an HTTP PUT or POST REST request into Uniconfig.","To configure a new Loopback999 interface:","topology=cli- CLI-managed device","URL and payload need to conform to openconfig data models used for all CLI devices.","URL must specify the following:"]},{"i":"netconf-direct-from-device-configuration-data-read","l":"Netconf, direct from device, configuration data read","p":["?content=config- only configuration data is read from the device (if not given, defaults to config)","Data flow architecture","Flow diagram:","Flow for reading structured (YANG-model based) configuration data from a device over Netconf. The data is always retrieved from the device with no cache involved.","node=nodeID- specific managed device","Restconf component parses the URL and validates it against vendor-specific YANG models.","Restconf example:","Restconf invokes an asynchronous read from the southbound layer, but blocks until it completes.","Restconf receives the data from the Netconf layer, serializes them into JSON and completes the request","The Netconf layer serializes the path (URL) into a get-config request with a filter, sends it to the device and parses the output into an internal DOM data structure.","The User sends an HTTP GET REST request to Uniconfig.","To get Loopback999 interface configuration using IOS XR vendor models:","topology=topology-netconf- Netconf-managed device","URL must conform to vendor-specific YANG data models used by the device.","URL must specify the following:","Which models are used depends on the device. Many vendor-specific models can be found on GitHub."]},{"i":"netconf-direct-from-device-operational-data-read","l":"Netconf, direct from device, operational data read","p":["Flow for reading structured (YANG-model based) operational data from a device over Netconf. The data is always retrieved from the device with no cache involved.","This flow is identical to the Netconf, direct from device, configuration data read flow. The difference is that this READ returns a combination of configuration and operational data by using get netconf RPC instead of get-config! To invoke operational data read, use ?content=nonconfig in the URL","Warning! Be careful when requesting operation data from devices. The data can be massive and the act of reading such data can cause issues on device itself. Always be as specific as possible, i.e., use the most specific (longest) URL possible.","Restconf example:","To get operational data for all interfaces using IOS XR vendor models:","Flow diagram:","Data flow architecture"]},{"i":"netconf-direct-to-device-configuration-data-write","l":"Netconf, direct to device, configuration data write","p":["Flow for writing structured (YANG-model based) configuration data to a device over Netconf. The data is transformed and sent directly to a device.","Note: We do not recommend writing directly to a device. The preferred option is to use Uniconfig core to build an intent and commit the changes to the network.","Restconf example:","To configure Loopback999 interface configuration using IOS XR vendor models:","Flow diagram:","Data flow architecture"]},{"i":"uniconfig-cached-intent-configuration-data-read","l":"Uniconfig, cached intent configuration, data read","p":["?content=config- only intent data is read for a device (if not given, defaults to config == intent)","An ad-hoc uniconfig transaction is started.","Data flow architecture","Flow diagram:","Flow for reading structured (YANG-model based), cached intent configuration data (not applied to network) for a device, regardless of its management protocol. The data is retrieved from in-memory cache (or a database, if not available in memory).","For more information about transactions, see Build and commit mode or Immediate commit model.","node=nodeID- specific managed device","Restconf component parses the URL and validates it against device-specific YANG models.","Restconf example:","Restconf invokes an asynchronous read from uniconfig core, but blocks until it completes.","Restconf receives the data from Uniconfig core, serializes them into JSON and completes the request.","The ad-hoc transaction is closed.","The user sends an HTTP GET REST request to Uniconfig.","This is typically a very quick operation compared to reading directly from a device.","To get Loopback999 interface cached intent configuration using IOS XR vendor models:","To get Loopback999 interface cached intent configuration using openconfig models for a device over CLI:","topology=uniconfig- device cached in uniconfig","Transactions can be started automatically by Uniconfig or controlled by the user.","Uniconfig core reads in-memory cached intent (or loads the latest version of data from the database).","URL must conform to models used for that specific device, whether standard or vendor-specific models.","URL must specify the following:"]},{"i":"uniconfig-cached-applied-configuration-data-read","l":"Uniconfig, cached applied configuration, data read","p":["?content=nonconfig- only applied data is read for a device (if not given, defaults to config == intent)","An ad-hoc uniconfig transaction is started.","Data flow architecture","Flow diagram:","Flow for reading structured (YANG-model based), cached configuration data (already applied to the network) for a device, regardless of its management protocol. The data is retrieved from in-memory cache (or a database, if not available in memory).","For more information about transactions, see Build and commit mode or Immediate commit model.","node=nodeID- specific managed device","Restconf component parses the URL and validates it against device-specific YANG models.","Restconf example:","Restconf invokes an asynchronous read from Uniconfig core, but blocks until it completes.","Restconf receives the data from Uniconfig core, serializes them into JSON and completes the request.","The ad-hoc transaction is closed.","The user sends an HTTP GET REST request to Uniconfig.","This is typically a quick operation compared to reading directly from a device.","To get Loopback999 interface cached intent configuration using IOS XR vendor models:","To get Loopback999 interface cached intent configuration using openconfig models for a device over CLI:","topology=uniconfig- device cached in uniconfig","Transactions can be started automatically by Uniconfig or controlled by the user.","Uniconfig core reads in-memory cached, already applied configuration (or loads the latest version of data from a database).","URL must conform to models used for that specific device, whether standard or vendor-specific models.","URL must specify the following:"]},{"i":"uniconfig-applying-intent-to-a-device","l":"Uniconfig, applying intent to a device","p":["Flow for writing structured (YANG-model based) configuration data to Uniconfig's intent. Intent is typically modified for multiple devices. When modifications are completed, a commit is issued to apply the changes to the network. Automated rollback may kick in when a failure occurs.","For more information on this flow, see Build and commit mode or Immediate commit model.","Note: Uniconfig core builds on top of \"direct to device data flows\" and everything south of Uniconfig core is identical to those (direct to device) data flows. For example, Uniconfig core uses the Netconf, direct to device, configuration data write flow to apply configurations to Netconf devices when performing a commit.","Restconf example:","To configure two devices in a single transaction:","Flow diagram:","Data flow architecture"]},{"i":"uniconfig-synchronizing-applied-configuration-from-network","l":"Uniconfig, synchronizing applied configuration from network","p":["An ad-hoc uniconfig transaction is started.","Data flow architecture","Flow diagram:","Flow for synchronizing/updating an applied configuration from a network device. This is useful especially when the configuration is changed in the network directly (outside of Uniconfig). Once the configuration is synchronized, those direct changes can be accepted or reverted in Uniconfig.","For more information about transactions, see Build and commit mode or Immediate commit model.","For more information on this flow, see Sync from network.","Payload must specify a list of devices to be synchronized.","Restconf example:","Restconf invokes an asynchronous RPC in Uniconfig core, but blocks until it completes.","Restconf receives a success or failed response from Uniconfig core and maps it to the appropriate status code and response.","The ad-hoc transaction is committed.","The user sends an HTTP GET REST request to Uniconfig.","To synchronize two devices from the network:","Transactions can be started automatically by Uniconfig or controlled by the user.","Uniconfig core performs direct from device configuration data read flows for each device in parallel.","Uniconfig core stores the new configuration in the applied configuration cache and in the database.","Uniconfig has a mechanism to verify whether a device is out of sync based on the last commit timestamp. A full configuration is performed only if out of sync.","URL specifies Uniconfig defined sync-from-network RPC."]}],[{"l":"Data Security Models","p":["UniConfig supports encryption and hashing of leaf/leaf-list values on SSH and RESTCONF API. Following sections describe supported security models in depth."]},{"l":"Data encryption","p":["UniConfig uses asymmetric encryption for ensuring confidentiality of selected leaf and leaf-list values. Currently, only RSA ciphers are supported (both global UniConfig and device-level key-pairs). Encryption is supported in 'uniconfig', 'unistore', and 'templates' topologies."]},{"l":"Global-device encryption architecture","p":["Both UniConfig and device uses PKI for encryption of data:","UniConfig side: All selected leaves are encrypted using global public key when this data enters UniConfig via RESTCONF API or UniConfig SSH shell API. Afterwards, data is stored in database in the encrypted format. UniConfig has also access to private key which is used internally for decryption of already encrypted data.","Device side: Device exposes public key and UniConfig uses this key for re-encryption of data before it is sent to device ('commit'/'checked-commit' operations). However, device doesn't expose its private key - UniConfig is not able to detect changes done to encrypted data (updated leaves/leaf-lists) - it is only able to detect, if data was removed or created, not updated. Because of this reason, UniConfig assumes that read encrypted data from device has been encrypted using the same public key as it was used by UniConfig.","Following picture depicts data transformations done on UniConfig interfaces:","Global-device encryption model"]},{"l":"Global-only encryption architecture","p":["In comparison to Global-device encryption architecture this model uses only global key-pair for encryption of data. Devices contain only plaintext data.","Public key is used for encryption of received data via RESTCONF, UniConfig shell API, and when syncing configuration from device to UniConfig transaction ('sync-from-network' operation).","Private key is used for decryption of encrypted data before forwarding this configuration to device('commit'/'checked-commit' operations).","Next picture depicts data transformations done on UniConfig interfaces:","Global-only encryption model","Reading of operational data from device directly (GET under 'yang-ext:mount') shows data in unencrypted format. Application gateways should restrict access to mountpoints in this use-case."]},{"l":"YANG support","p":["Leaves and leaf-lists, which value user would like to store encrypted, must be marked using YANG extension without any parameters. Currently, only leaves with 'string' type (direct/indirect with custom type definitions) are supported, since encrypted values are base64 encoded. Also, be aware that type constraints must accept encrypted values.","Example YANG module that defines one 'encrypt' extension:","Usage of the extension in the 'config' module:","Many times, it is not possible to modify existing YANG files because they are already deployed on device, for example device running with NETCONF server. In this case, user can still mark what leaves should be encrypted using additional YANG module that contains deviations.","Example:","Afterwards, user has 2 options how this module can be coupled with modules from device (NETCONF):","Explicit specification of this side-loaded module in the 'install-node' request - using'netconf-node-topology:yang-module-capabilities' settings (see 'Device installation' section).","Automatic detection of side-loaded module - UniConfig looks for specific capability from NETCONF server, inherits its revision, and then looks for side-loaded module with specific name and inherited revision(see 'Configuration' section). This option is preferred, if deployment contains multiple versions of devices and list of encrypted paths are different on each version."]},{"l":"Configuration","p":["Global RSA key-pair is stored inside PEM-encoded files in the 'rsa' directory under UniConfig root. Name of the private key must be 'encrypt_key' and name of the public key must be 'encrypt_key.pub'. If user doesn't provide these files, UniConfig will automatically generate its own key-pair with length of 2048 bits. All UniConfig instances in the cluster must use the same key-pair.","Encryption settings are stored in the 'config/lighty-uniconfig-config.json' file under 'crypto' root object.","Example:","encrypt-enabled - If this setting is false, then encryption is disabled despite other settings or install-node parameters. If this setting is true, then encryption is enabled. The default value is true.","encrypt-extension-id - If this setting is not defined, then encryption is disabled despite other settings or install-node parameters. The value must have the format [module-name]:[extension-name] and specifies extension used for marking of encrypted leaves/leaf-lists in YANG modules. Corresponding YANG module, that contain this extension, can be part of device/unistore YANG schemas, or it can be side-loaded during installation of NETCONF device as imported module from 'default' repository.","netconf-reference-module - Name of the module for which NETCONF client looks for during mounting process. If UniConfig finds module with this name in the list of received capabilities, then it uses its revision in the lookup process for correct YANG module with encrypted paths (using deviations).","netconf-encrypted-paths-module-name - Name of the module which contains deviations with paths to encrypted leaves/leaf-lists. There could be multiple revisions of this file prepared in the 'default' NETCONF repository. NETCONF client in the UniConfig chooses the correct revision based on 'netconf-reference-module-name' setting. Together, netconf-reference-module-name' and 'netconf-encrypted-paths-module-name' can be used for autoload of encrypted paths for different versions of devices.","If 'default' YANG repository contains module with encrypted-paths without defined YANG revision and device does not already provide encryption capability, then encrypted-paths module is used as the last resort during installation of device ('netconfReferenceModuleName' and matching of revisions are ignored)."]},{"l":"Change encryption status","p":["For proper working of this RPC it is necessary to enable notifications with this parameter:","Encryption can be enabled or disabled with the parameter:","The value of this parameter can be changed with the 'change-encryption-status' RPC request.","Following request is used to enable encryption:","After calling this command, all UniConfig instances will set this parameter using the notification service to the value which is sent via RPC, in this case it will be set to value true.","Following request is used to disable encryption:","Following request is used to check actual encryption status:","To check the functionality of this RPC, after calling install-device RPC we can request the password of the node, which when encryption is enabled will be returned encrypted, and when it is disabled password will be plain text."]},{"i":"change-encryption-keys-private-and-public","l":"Change encryption keys (private and public)","p":["In case it is necessary to change the encryption keys, there is RPC change-encryption-keys. The process of changing encryption keys requires rebooting one of the instances of UniConfig or enabling a new instance of UniConfig after calling change-encryption-keys RPC(rotation of encrypted data in the database for new encryption keys occurs when UniConfig is started if RPC change-encryption-keys is executed). During key rotation, if some data in the database cannot be decrypted with the old key, those data will remain unchanged.","The default value of 'new-encryption-cipher-type' parameter is value 'RSA', so there is no need to add this parameter to the request body.","To check if UniConfig needs to be restarted or if a new UniConfig instance needs to be added, the following query can be run:","After key rotation when UniConfig is started, the data encrypted using the old key will be overwritten with the new encryption keys and all other UniConfig instances in the cluster will use the new keys for encryption.","During key rotation, UniConfig reads and updates encrypted configurations in batch groups. The size of these groups can be set with the parameter:"]},{"l":"Device installation","p":["There are 2 settings related to encryption in the 'install-node' RPC request:","uniconfig-config:crypto - It allows specifying path to public key on device - 'public-key-path' (leaf with RFC-8040 path) and cipher type (by default, RSA is used) - 'public-key-cipher-type'. If path to public key is specified, and it exists on device, then Global-device encryption model is used. Otherwise, Global-only encryption model is selected.","netconf-node-topology:yang-module-capabilities - If autoload of YANG module with encrypted paths is not used and device itself doesn't specify encrypted leaves, then it is necessary to side-load YANG module with encrypted paths. This parameter is relevant only on NETCONF nodes. Side-loaded modules must be expressed in the format of NETCONF capabilities.","Following request shows install-node request with specified both path to public key and side-loaded YANG module'encrypted-paths' with revision '2021-12-15' and namespace 'urn:ietf:params:xml:ns:yang:encrypted-paths'.","During installation, UniConfig tries to download public key from device. Public key can be verified using GET request:"]},{"l":"Format of encrypted data","p":["Encrypted values are stored and displayed via RESTCONF or UniConfig shell with the 'rsa_' prefix. The prefix is used by UniConfig to see if posted data is encrypted already or needs to be encrypted.","The encrypted string is encoded using Base64 encoding."]},{"i":"example-global-device-model","l":"Example: global-device model","p":["The next use-case shows encryption of values marked by 'frinx-encrypt:encrypt' extension on both UniConfig server side and device side. NETCONF device directly exposes 'frinx-encrypt' YANG module and leaves with applied extension(side-loading of encrypted paths is not necessary).","Used YANG model for simulation of YANG device:"]},{"i":"example-global-only-model","l":"Example: global-only model","p":["The next use-case shows encryption of values marked by 'frinx-encrypt:encrypt' extension only on UniConfig server side. NETCONF device directly exposes 'frinx-encrypt' YANG module and leaves with applied extension(side-loading of encrypted paths is not necessary).","Used YANG model for simulation of YANG device is same as in the previous use-case."]},{"l":"Data hashing","p":["UniConfig supports 'iana-crypt-hash' YANG model for specification of hashed values in data-tree using type definition'crypt-hash'. Hashing works in the 'uniconfig' and 'unistore' topologies. Only NETCONF devices are currently supported because CLI cannot be natively used for reporting of device capabilities that would contain supported hashing function."]},{"l":"Architecture","p":["Hashing is done only in the RESTCONF layer after writing some data that contains leaves/leaf-lists with 'crypt-hash' type. Afterwards, UniConfig stores, uses, and writes to device only hashed representation of these values.","Hashing model"]},{"i":"yang-support-1","l":"YANG support","p":["YANG module 'iana-crypt-hash':","http://www.iana.org/assignments/yang-parameters/iana-crypt-hash@2014-08-06.yang","All 3 hash functions are implemented - 'MD5', 'SHA-256', 'SHA-512'. In case of 'uniconfig' topology, hashing function is selected based on reported feature in the NETCONF capability, in case of 'unistore' topology, UniConfig enforces 'SHA-512' hashing function."]},{"i":"device-installation-1","l":"Device installation","p":["Hashing is enabled by default on NETCONF devices that reports corresponding 'iana-crypt-hash' model-based capability. User doesn't have to add entry setting in the 'install-node' request.","After successful installation of device, it is possible to check loaded hashing function that will be used for storing of hashed values. Use following GET request:"]},{"i":"example-hashing-input-values","l":"Example: hashing input values","p":["This example demonstrates hashing of input values with 'crypt-hash' type on RESTCONF API."]}],[{"l":"Logging Framework"},{"l":"Logback Configuration","p":["UniConfig distribution uses Logback as the implementation of the logging framework. Logback is the successor to to the log4j framework with many improvements such as; more options for configuration, better performance, and context-based separation of logs. Context-based separation of logs is used widely in UniConfig to achieve per-device logging based on the set marker in the logs.","Logback configuration is placed in 'config/logback.xml' file under UniConfig distribution. For more information about formatting of logback configuration, look at the http://logback.qos.ch/manual/configuration.html site. This section describes parts of the configuration in the context of UniConfig application."]},{"l":"Appenders","p":["The following appenders are used:","'STDOUT': Prints logs into the console.","'logs': Used for writing all logs to the output file on path'log/logs.log'. The rolling file appender is applied.","'netconf-notifications', 'netconf-messages', 'netconf-events', and'cli-messages', 'gnmi-messages': Sifting appenders that split logs per node ID that is set in the marker of the logs. Logs are written to different subdirectories under 'log' directory and they are identified by their node ID. The rolling file appender is applied.","'restconf': Appender used for writing of RESTCONF messages into'log/restconf.log' file. The rolling file appender is applied.","'gnmi': Appender used for writing of Logs related to gNMI topology."]},{"l":"Loggers","p":["There are 2 groups of loggers:","Package-level logging brokers: Loggers that are used for writing general messages into the console and a single output file. Logging level is set by default to 'INFO'. For debugging purposes it is handy to change logging threshold to 'TRACE' or 'DEBUG' level. Covered layers: UniConfig, Unified, Controller, RESTCONF, CLI, NETCONF, gNMI. Used appenders: 'STDOUT' and 'logs'.","Loggers used for logging brokers: These loggers should not be changed since the state of logging can be changed using RPC calls. Classpaths point to specific classes that represent implementations of logging brokers, the logging level is set to 'TRACE'. Used appenders: 'netconf-notifications', 'netconf-messages','netconf-events', 'cli-messages', 'gnmi-messages' and 'restconf'."]},{"l":"Updating Configuration","p":["Logback is configured to scan for changes in its configuration file and automatically reconfigure itself when the configuration file changes.","Scanning period is set by default to 5 seconds."]},{"l":"Example configuration","p":["In the logback.xml file you can edit level of logging for each component of UniConfig:"]},{"l":"INFO","p":["This is recommended level for production environments. INFO messages display behavior of applications. They state what happened. For example, if a particular service stopped or started or you added something to the database. These entries are nothing to worry about during usual operations. The information logged using the INFO log is usually informative, and it does not necessarily require you to follow up on it."]},{"l":"DEBUG","p":["With DEBUG, you are giving diagnostic information in a detailed manner. It is verbose and has more information than you would need when using the application. DEBUG logging level is used to fetch information needed to diagnose, troubleshoot, or test an application. This ensures a smooth running application."]},{"l":"TRACE","p":["The TRACE log level captures all the details about the behavior of the application. It is mostly diagnostic and is more granular and finer than DEBUG log level. This log level is used in situations where you need to see what happened in your application."]},{"l":"Logging Brokers","p":["The logging broker represents a configurable controller that logs one logical group of messages from a single classpath. Logging of multiple messages from the same classpath simplifies configuration of loggers in Logback since only one logger per broker must be specified. The logging broker can be controlled using RESTCONF RPCs; there are multiple operations where it is possible to trigger logging for the whole broker, or just for specified node IDs. Configuration of the logger in the logback file that is assigned to the logging broker should not be changed at all."]},{"l":"Implemented Logging Brokers","p":["The following subsections describe currently implemented logging brokers."]},{"l":"RESTCONF","p":["It is used for logging authenticated HTTP requests and responses; information about URI, source, HTTP method, query parameters, HTTP headers, and body.","Per-device logging cannot be enabled for this broker; all logs are saved to 'log/restconf.log' file.","It is possible to configure HTTP headers in which the content must be masked in logs (using asterisk characters). This is useful especially if there are some headers which contain private data(such as Authorization or a Cookie header). Hidden HTTP headers are marked using header identifiers.","It is also possible to configure HTTP methods for which the communication (requests and responses) should not be logged to a file.","Requests and responses are paired using a unique message-id. This message-id is not part of the HTTP request, it is generated on the RESTCONF server.","Requests and responses contain Uniconfig transactions for easier matching with the log-transactions.","Example: - Request and corresponding response with the same message-id"]},{"l":"CLI messages","p":["Broker used for logging of all CLI requests and responses.","These CLI requests and responses are paired with unique message-id attribute, which is generated.","Per-device logging is supported - logs for CLI messages are stored under 'log/cli-messages' directory and named by '[node-id].log' pattern.","Example - sending POST RPC for installing CLI device, and getting requests with corresponding responses paired with same Message-ID:"]},{"l":"NETCONF Messages","p":["A broker is used for logging of all NETCONF messages incoming or outgoing, except the NETCONF notifications (a distinct broker has been introduced for notifications).","NETCONF RPC's and responses can be matched using the 'message-id' attribute that is placed in the RPC header.","Per-device logging is supported, logs for NETCONF messages are stored under the directory 'log/netconf-messages' and named by the'[node-id].log' pattern.","Example: - Sending NETCONF GET RPC and receiving response","Number 641 represents the session ID. It is read from the NETCONF hello message. If multiple sessions are created between the NETCONF server and NETCONF client and are logically grouped by the same node ID, then logs from multiple sessions are stored to the same logging file (this is needed to distinguish between the sessions). Multiple NETCONF sessions between the UniConfig and NETCONF server are created for each subscription to the NETCONF stream."]},{"l":"NETCONF Notifications","p":["A broker is used for logging of incoming NETCONF notifications.","Per-device logging is supported, logs for NETCONF notifications are stored under the directory 'log/netconf-notifications' and named by the '[node-id].log' pattern.","Example: - Received two notifications"]},{"l":"NETCONF Events","p":["Logs generated by this broker contain session-related information about the establishment or closing of a NETCONF session from the view of the NETCONF client placed in UniConfig.","These logs don't contain full printouts of sent or received NETCONF messages.","Per-device logging is supported, logs for NETCONF events are stored under the directory 'log/netconf-events' and named by the'[node-id].log' pattern.","Example:"]},{"l":"gNMI Messages","p":["A broker is used for logging of all gNMI SET/GET messages incoming or outgoing, except the gNMI notifications.","Per-device logging is supported, logs for gNMI messages are stored under the directory 'log/gnmi-messages' and named by the'[node-id].log' pattern.","Example: - Sending gNMI SET request and receiving response"]},{"l":"Supported Logging Settings","p":["Current logging broker settings are stored in the Operational datastore under the 'logging-status' root container. The following example shows a GET query that displays the logging broker settings:","Response:","Logging settings are encapsulated inside multiple list entries ('broker' list) where each list entry contains settings for one logging broker. Description of the settings that are placed under a single logging entry:","broker-identifier: Unique identifier of the logging broker. Currently, 5 brokers are supported: 'netconf_messages', 'restconf','netconf_notifications', 'netconf_events', and cli_messages.","is-logging-broker-enabled: Flag that specifies whether the logging broker is enabled. If the logging broker is disabled, then no logging messages are generated.","is-logging-enabled-on-all-devices: If this flag is set to'true', then logs are separated to distinct files in the scope of all devices. If it is set to 'false', then logging is enabled only for devices that are listed in the 'enabled-devices' leaf-list / array. This setting is unsupported in the 'restconf' logging broker since RESTCONF currently doesn't differentiate the node ID in the requests or responses.","enabled-devices: If 'is-logging-enabled-on-all-devices' is set to 'false', then logs are generated only for devices that are specified in this list, it acts as a simple filtering mechanism based on the whitelist. Blacklist approach is not supported, it is not possible to set 'is-logging-enabled-on-all-devices' to 'true' and specify devices for which logging feature is disabled. This field is not supported in the 'restconf' logging broker.","RESTCONF-specific settings:","restconf-logging:hidden-http-methods- HTTP requests (and associated HTTP responses) are not logged if request's HTTP method is set to one of the methods in this list. Names of the HTTP methods must be specified using upper-case format.","restconf-logging:hidden-http-headers- List of HTTP headers(names of the headers) which content is hidden in the logs. Names of the HTTP headers are not case-sensitive.","GNMI-specific settings:","gnmi-logging:message-types- gNMI message types that are enabled to be logged. Names of the message types must be specified using upper-case format.","Global settings that are common in all logging brokers:","hidden-types- Value of leaf or leaf-list that uses one of these types is hidden in the logs using asterisk characters. It can be used for masking of passwords or other confidential data from logs."]},{"l":"Initial Configuration","p":["By default, all logging brokers are disabled and logging is disabled on all devices, the user must explicitly specify a list of devices for which per-device logging is enabled. Also, RESTCONF-specific filtering is not configured, all HTTP requests and responses are fully logged, no content is dismissed. By default, only SET gNMI message type is set to be logged.","Initial logging configuration can be adjusted by adding the'loggingController' configuration into the'config/lighty-uniconfig-config.json' file. The structure of this configuration section conforms YANG structure that is described by the'logging' and 'restconf-logging' modules, it is possible to copy the state of the Operational datastore under 'logging-status' into the'loggingController' root JSON node.","The next JSON snippet shows the sample configuration'loggingController', logging brokers 'netconf_messages' and'netconf_notifications' are enabled; the 'netconf_messages' broker is enabled for all devices while 'netconf_notifications' is enabled only for 'xr6' and 'xr7' devices.","If unknown parameters are specified in a configuration file, they will be ignored and a warning, that the corresponding parameter was ignored, will be logged."]},{"l":"Controlling of Logging Using RPC Calls","p":["Since logging settings are stored in the Operational datastore, it is possible to adjust these settings on runtime only using RPC calls. The following subsections describe available RPCs."]},{"l":"Enable Logging Broker","p":["An RPC is used for enabling the logging broker. The enabled logging broker is available to write logs.","The input contains only the name of the the logging broker,'broker-identifier'.","Example: - Enable logging broker with the identifier 'restconf'","The output shows a positive response given the broker was previously in a disabled state:"]},{"l":"Disable Logging Broker","p":["An RPC is used for turning off the logging broker. A disabled logging broker doesn't write any logs despite other settings.","The input contains only the name of the the logging broker,'broker-identifier'.","Example: - Disabling the logging broker with the identifier 'restconf'","The output shows a positive response given the broker was previously in an enabled state:"]},{"l":"Enable Default Device Logging","p":["An RPC is used for setting the default device logging to 'true', logs will be written for all devices without filtering any logs based on their node ID.","The input contains only the name of the the logging broker,'broker-identifier'.","Invocation of this RPC causes clearing of the leaf-lest'enabled-devices'.","Example: - Enable default device logging in the 'netconf_messages' logging broker","The output shows a positive response given the broker was previously in a disabled state:"]},{"l":"Disable Default Device Logging","p":["An RPC is used for setting default device logging to 'false', logs will be written only for devices that are named in the leaf-list'enabled-devices'. If the leaf-list 'enabled-devices' doesn't contain a node ID, then logging in the corresponding logging broker is effectively turned off.","The input contains only the name of the the logging broker,'broker-identifier'.","Example: - Disable default device logging in 'netconf_messages' logging broker","The output shows a positive response given the broker was previously in an enabled state:"]},{"l":"Enable Device Logging","p":["An RPC is used for enabling logging of specified devices that are identified by node IDs.","The input contains the name of the the logging broker'broker-identifier' and a list of node IDs called 'device-list'.","Example: - Enable logging for devices with node IDs: 'node1', 'node2', and 'node3' in the 'netconf_events' logging broker","The output shows a positive response:"]},{"l":"Disable Device Logging","p":["An RPC is used for turning off logging of specified devices that are identified by node IDs.","The input contains the name of the the logging broker'broker-identifier' and a list of node IDs called 'device-list'.","Example: - Disable logging for device with node ID 'node1' in the 'netconf_events' logging broker","The output shows a positive response:"]},{"l":"Setting Global Hidden Types","p":["An RPC is used for setting identifiers of hidden YANG type definitions. Values of leaves and leaf-lists that are described by these types are masked in the output logs.","This RPC overwrites all already configured hidden types. An empty list of hidden types disables filtering of data values.","Filtering of values applies to all logs, including RESTCONF logs.","Example: - Setting 3 hidden types","The output shows a positive response:"]},{"l":"Setting Hidden HTTP Headers","p":["An RPC is used for overwriting the list of HTTP headers which content is masked in the output of the RESTCONF logs.","This RPC modifies behavior of only the 'restconf' logging broker.","HTTP headers in both requests and responses are masked.","The list of hidden HTTP headers denotes header identifiers.","The identifier of 'hidden' HTTP header still shows in the output logs, however, the content of such header is replaced by asterisk characters.","Example: - Hiding content of 'Authorization' and 'Cookie' HTTP headers","A positive response is shown in the output:"]},{"l":"Setting Hidden HTTP Methods","p":["An RPC is used for overwriting the list of HTTP methods. RESTCONF communication, that may include invocation of hidden HTTP methods, is not displayed in the output logs.","Both requests and responses with hidden HTTP methods are not written to the log files.","This RPC modifies behavior of only 'restconf' logging behaviour.","Example: - Hiding GET and PATCH communication in the RESTCONF logs","A positive response is shown in the output:"]},{"l":"Setting gNMI message types","p":["An RPC is used for overwriting the list of supported gNMI message types.","This RPC modifies behavior of only 'gnmi messages' logging behaviour.","Example: - Setting SET and GET message types","A positive response is shown in the output:"]}],[{"l":"OpenAPI","p":["The OpenAPI file located in the openapi folder contains all the RPCs and data manipulating requests (CRUD operations), and their respective examples. A shell script (named start_uniconfig_swagger.sh) was created that automatically checks if the file is present and runs it in a docker container where the Swagger API runs, and opens the file containing all the RPCs and data manipulating requests. After running the shell script, open any browser and type localhost in the URL bar.","Overview of our OpenAPI along with all parameters and expected returns can be found here","The website should look like on the screenshot below:","openapi website","Alternatively, you can look at our live instance of the site that always displays latest version of the API."]}],[{"l":"Thread pools","p":["There are several thread pools that can be configured in UniConfig:","Jetty server,","Task executor,","Notifications,","SSH Client,","NetConf topology,","CLI topology."]},{"l":"Jetty server","p":["Jetty server is used to aggregate connectors (HTTP request receivers) and request handlers. Connectors use the thread pool methods to run jobs that will eventually call the handle method.","Available parameters to configure:","jetty.max-threads=200","The maximum number of threads available in the jetty server. The default value is 200.","jetty.min-threads=8","The minimum number of threads available in the jetty server. The default value is 80.","jetty.idle-timeout=60","Threads that are idle for longer than this period (in seconds) can be stopped. The default value is 60.","If any of these parameters are left empty (e.g. jetty.max-threads=), the default value is used."]},{"l":"Task Executor","p":["The task executor is used to execute operations (internal operations or RPCs), either synchronously or asynchronously, on given nodes or devices.","task-executor.max-queue-capacity=10000","The maximum queue capacity for postponed tasks. The default value is 10000.","task-executor.max-cpu-load=0.9","The maximum CPU load for executing tasks. Load is expressed as a ratio so that 1.0 corresponds to 100% load, 0.9 to 90%, etc. The default value is 0.9.","task-executor.default-thread-count=","The efault thread count used for executing tasks. The default value is the number of available processors * 2.","task-executor.max-thread-count=","The maximum thread count used for executing tasks. The default value is default-thread-count* 20.","task-executor.keepalive-time=60","The time in seconds before the execution of a specified task is timed out. The default value is 60.","If any of these parameters are left empty (e.g. task-executor.default-thread-count=), the default value is used."]},{"l":"Notifications","p":["A NetConf related thread pool that handles notification subscriptions (acquiring of subscriptions, release of subscriptions, etc.).","notifications.thread-parameters.monitoring-executor-initial-pool-size=","The initial thread count used by the monitoring executor. The default value is the number of available processors.","notifications.thread-parameters.monitoring-executor-maximum-pool-size=","The maximum thread count used by the monitoring executor. The default value is initial-pool-size* 4.","notifications.thread-parameters.monitoring-executor-keepalive-time=60","The time in seconds before the execution of a specified task is timed out in the monitoring executor. The default value is 60.","If any of these parameters are left empty (e.g. notifications.thread-parameters.monitoring-executor-initial-pool-size=), the default value is used."]},{"l":"SSH Client","p":["SSH Client uses a thread pool that handles communication with devices. This thread pool is shared between NetConf and CLI topologies.","ssh-client.default-timeout=-1","Timeout for SSH connections (in seconds). If set to a negative value, timeouts are disabled. The default value is -1.","ssh-client.heartbeat-interval=30","The interval (in seconds) at which the client pings the server to check if the connection is still alive. The default value is 30.","ssh-client.heartbeat-reply-wait=60","Indicates if the heartbeat request expects a reply. Time (in seconds) to wait for a reply, a non-positive value means that no reply is expected. The default value is 60.","ssh-client.heartbeat-request=keepalive@sshd.apache.org","The heartbeat request that is sent to the server. The default value is keepalive@sshd.apache.org.","ssh-client.ssh-default-nio-workers=8","The amount of non-blocking workers that handle communication messages. The default value is 8.","If any of these parameters are left empty (e.g. ssh-client.ssh-default-nio-workers=), the default value is used."]},{"l":"NetConf Topology","p":["NetConf topology thread pools are used to connect to NetConf devices and keep the connection alive.","netconf-topology-parameters.fixed-thread-pool-thread-count=2","The fixed thread pool thread count in the NetConf topology. Used to read device capabilities and schema set up. The default value is 2.","netconf-topology-parameters.scheduled-thread-pool-thread-count=2","The scheduled thread pool thread count in the NetConf topology. Used to schedule keepalive messages. The default value is 2.","If any of these parameters are left empty (e.g. netconf-topology-parameters.fixed-thread-pool-thread-count=), the default value is used."]},{"l":"CLI Topology","p":["CLI topology thread pools are used to connect to CLI devices and keep the connection alive.","cli-topology-parameters.keepalive-thread-count=","The thread pool count dedicated ONLY to keepalive and reconnect scheduling. The default is either 2 or the number of available processors, whichever is higher.","cli-topology-parameters.init-executor-thread-timeout=120","If any thread is unused for this period (in seconds), it is stopped and recreated in the future if necessary.","cli-topology-parameters.init-executor-thread-count=","The maximum, number of threads for the flexible thread pool executor. This thread pool is used to process events and asynchronous locking of the CLI layer. The default is the number of available processors * 8.","If any of these parameters are left empty (e.g. cli-topology-parameters.keepalive-thread-count=), the default value is used."]}],[{"l":"TLS encryption for Postgres database","p":["By default all the communication to the database is not encrypted. In deployments where UniConfig is running separately from database, the traffic might be visible to unwanted eyes. Here are the steps to enabling TLS encryption for communication with the database."]},{"l":"Generating self-signed certificate using OpenSSL","p":["If you already have SSL keys generated, you need to convert them to proper format, see Converting SSL keys to proper format, otherwise you need to generate them."]},{"l":"Converting SSL keys to proper format","p":["The proper format for the SSL keys is the following:","The command which needs to be used to convert the keys properly may differ based on the format of the keys in which they are available. They can be converted using OpenSSL version 1.1.1, from command line openssl command. OpenSSL documentation provides examples for the most common cases.","To convert to PKCS-8 DER binary format, consult the documentation here: PKCS-8","To convert to PKCS-12 format, consult the documentation here: PKCS-12"]},{"l":"Enabling TLS for the database connection","p":["The configuration file that must be modified can be found on the following path relative to the UniConfig root directory:","Then edit the configuration section in dbPersistence section.","Example:","The TLS related fields are the following:","enabledTls- setting to true enables TLS encryption, default is false","tlsClientCert- specify the relative path to the Client certificate from the root UniConfig directory","tlsClientKey- specify the relative path to the Client key from the root UniConfig directory, this can be PKCS-12 or PKCS-8 format","tlsCaCert- specify the relative path to the root CA certificate from the root UniConfig directory","sslPassword- if the tlsClientKey file is encrypted with password, specify it here. It is needed for PKCS-12 keys and for encrypted PKCS-8 keys, this will be ignored for the unencrypted keys.","Do not forget to adjust other database connection parameters accordingly."]}],[{"l":"TLS-based Authentication","p":["In the default version of UniConfig TLS authentication is disabled. To enable TLS for RESTCONF you must setup two things:","Key-store and trust-store that hold all keys and certificates. If authentication of individual clients is not required, trust-store doesn't have to be created at all. Key-store must always be initialized.","Enabling of TLS in UniConfig by editing the lighty configuration file."]},{"l":"Setting of Key-store and Trust-store","p":["Steps required for preparation of key-store and trust-store:","Create a directory under the UniConfig root directory that will contain key-store and optionally trust-store files, for example:","Create a new key-store. There are two options depending on whether you already own the certificate that you would like to use for the identification of UniConfig on the RESTCONF layer.","Create a new key-store with the generated RSA key-pair (in the example the length of 2048 and validity of 365 days is used). After execution of the following command, the prompt will ask you for information about currently generated certificate that will be pushed into the newly generated key-store secured by a password(this secret will be used later in the configuration file - remember it).","Create a new key-store with already generated RSA key-pair (your certificate that you would like to use for authentication in ODL).","(Optional step) Create a new trust-store using an existing certificate (an empty truststore cannot be created). If you have multiple client certificates, they can be pushed to truststore with the same command executed multiple times (but alias must be unique for each of the imported certificates). Example:","You can easily convert OPENSSL PEM certificates to DER format that is supported by keytool:","If your application needs to own distribution's certificate, you can export certificate from generated key-pair that we have pushed into the keystore (PKCS12 or OPENSSL format):"]},{"l":"Enabling of TLS in UniConfig","p":["Preparation of the TLS key-store and trust-store is not enough for enabling TLS within the RESTCONF API. It is also required to point UniConfig to these created storages and explicitly enable TLS by setting a corresponding flag. The configuration file that must be modified can be found on the following path relative to the UniConfig root directory:","Then, you must append the TLS configuration snippet (it must be placed under the root JSON node) to the configuration file. The following example snippet enables TLS authentication, disables user-based authentication (hence trust-store is not required at all), and points UniConfig to the key-store file that we have created in the previous section.","If your deployment requires authentication of individual RESTCONF users as well, you should also specify the trust-store fields by setting the'enabledClientAuthentication' field to 'true'.","You can also specify included or excluded cipher suites and TLS versions that can or cannot be used for establishing a secured tunnel between the Jetty server and clients. The following default configuration is based on actual recommendations (you should adjust it as needed):","It is enough to specify only the included protocols and included cipher suites (all other entries are denied), or excluded protocols and excluded cipher suites (all other entries are permitted). If you specify the same entries under both the included and excluded cipher suites or protocols, the excluded entry has higher priority. For example, the final set of usable cipher suites is: setOf(includedCipherSuites), setOf(excludedCipherSuites)."]}],[{"l":"UniConfig Clustering"},{"l":"Introduction","p":["UniConfig can be easily deployed in the cluster thanks to its stateless architecture and transaction isolation:","stateless architecture - UniConfig nodes in the cluster don't keep any state that would have to be communicated directly to other Uniconfig nodes in a cluster. All network-topology configuration and state information are stored inside a PostgreSQL database that must be reachable from all UniConfig nodes in the same zone. All Uniconfig nodes share the same database, making the database single source of truth for Uniconfig cluster.","transaction isolation - Load-balancing is based on mapping UniConfig transactions to Uniconfig nodes in a cluster (transactions are sticky). One UniConfig transaction cannot span multiple UniConfig nodes in a cluster. Southbound sessions used for device management are ephemeral - they are created when UniConfig needs to access device on the network (like pushing cnfiguration updates) and they are closed as soon as a UniConfig transactions is committed or closed.","There are several advantages of clustered deployment of UniConfig nodes:","horizontal scalability - Increasing number of units that can process UniConfig transactions in parallel. Single UniConfig node tends to have limited processing and networking resources - by increasing number of nodes in the cluster, this constraint can be mitigated. The more Uniconfig nodes in a cluster, the more transactions can be executed in parallel. Number of connected UniConfig nodes in the cluster can also be adjusted at the runtime.","high-availability - Single UniConfig node doesn't represent single point of failure. If UniConfig node crashes, only UniConfig transactions that are processed by corresponding node, are cancelled. Application can retry failed transaction, and it will be processed by next node in the cluster.","There also are a couple limitations to be considered:","Parallel execution of transactions is subject to a locking mechanism, where 2 transactions cannot manipulate the same device at the same time.","Single transaction is always executed by a single Uniconfig node. This means that a scope of a single transaction is limited by the number devices and their configuration a single Uniconfig node can handle."]},{"l":"Deployments"},{"l":"Single-zone deployment","p":["In the single-zone deployment, all managed network devices are reachable by all UniConfig nodes in the cluster - zone. Components of the single-zone deployment and connections between them are depicted by the next diagram.","Deployment with single zone","Description of components:","UniConfig controllers - Network controllers that use common PostgreSQL system for persistence of data, communicate with network devices using NETCONF/GNMi/CLI management protocols and propagate notifications into Kafka topics(UniConfig nodes act only as Kafka producers). UniConfig nodes do not communicate with each other directly, their operation can only be coordinated using data stored in the database.","Database storage - PostgreSQL is used for persistence of network-topology configuration, mountpoints settings, and selected operational data. PostgreSQL database can also be deployed in the cluster (outside of scope).","Message and notification channels - Kafka cluster is used for propagation of notifications that are generated by UniConfig itself (e.g., audit and transaction notifications) or from network devices and only propagated by UniConfig controller.","Load-balancers - Load-balancer is used for distributing transactions (HTTP traffic) and SSH sessions from applications to UniConfig nodes. From the view of load-balancer, all UniConfig nodes in a cluster are equall. Currently, only round-robin load-balancing strategy is supported.","Managed network devices - Devices that are managed using NETCONF/GNMi/CLI protocols by UniConfig nodes or generate notifications to UniConfig nodes. Sessions between UniConfig nodes and devices are either on-demand/emphemeral(configuration of devices) or long-term (distribution of notifications over streams).","HTTP / SSH clients & Kafka consumers - Application layer such as workflow management systems or end-user systems. RESTCONF API is exposed using HTTP protocol, SSH server is exposing UniConfig shell and Kafka brokers allow Kafka consumers to listen to the events on subscribed topics."]},{"l":"Multi-zone deployment","p":["In this type of deployment there are multiple zones that manage separate sets of devices because:","network reachability issues - groups of devices are only reachable and thus manageable from some part of the network (zone) but not from others","logical separation - there are different scaling strategies or requirements for different zones","legal issues - some devices must be managed separately with split storage, for example, because of the regional restrictions","The following diagrams represents a sample deployment with 2 zones. The first zone contains 3 UniConfig nodes while the second zone contains only 2 UniConfig nodes. Multiple zones might share a single Kafka cluster but database instances need to be split (could be running in a single postgres server).","Deployment with multiple zones","Description of multi-zone areas:","Applications - The application layer is responsible for managing mapping between network segments and Uniconfig zones. Typically this is achieved by deploying/using an additional inventory database that contains device <-> zone mappings - based on this information the application decides which zone to use.","Isolated zones - A zone contains one or more UniConfig nodes, load-balancers and managed network devices. The clusters in isolated zones share 0 information.","PostgreSQL databases - It is necessary to use dedicated database per zone.","Kafka cluster - Kafka cluster can be shared by multiple clusters in different zones or there could be single Kafka cluster per zone. Notifications from different zones can be safely pushed to the common topics since there can be no possible conflicts between Kafka publishers. However it is also possible to achieve isolation of published messages in a shared Kafka deployment by setting different topic names in different zones."]},{"l":"Load-balancer operation","p":["The responsibility of a load-balancer is to allocate UniConfig transaction on one of the UniConfig nodes in the cluster. It is done by forwarding requests without UniConfig transaction header to one of the UniConfig nodes(using round-robin strategy) and afterwards appending a backed identifier to the create-transaction RPC response in form of an additional Cookie header ('sticky session' concept). Afterwards, it is the responsibility of an application to assure that all requests that belong to the same transaction contain the same backend identifier.","The application is responsible for preserving transaction and backend identifier cookies throught a transaction lifetime.","The next sequence diagram captures a process of creating and using 2 UniConfig transactions with focus on load-balancer operation.","Load-balancing UniConfig transactions","The first create-transaction RPC is forwarded to the first UniConfig node (applying round-robin strategy), because it does not contain uniconfig_server_id key in the Cookie header. The response contains both UniConfig transaction ID (UNICONFIGTXID) and uniconfig_server_id that represents'sticky cookie'. Cookie header uniconfig_server_id is appended to the response by load-balancer.","The next request that belongs to the created transaction, contains same UNICONFIGTXID and uniconfig_server_id. Load balancer uses the uniconfig_server_id to forward this request to the correct UniConfig node.","The last application request represents again create-transaction RPC. This time, request is forwarded to the next registered UniConfig node in the cluster according to the round-robin strategy."]},{"l":"Configuration"},{"l":"UniConfig configuration","p":["All UniConfig nodes in the cluster should be configured with the same parameters. There are several important sections of config/lighty-uniconfig-config.json file related to clustered environment."]},{"l":"Database connection settings","p":["This section contains information how to connect to PostgreSQL database and connection pool settings. It is placed under 'dbPersistence.connection' JSON object.","Example with essential settings:","Be sure that [number of UniConfig nodes in cluster] * [maxDbPoolSize] does not exceed maximum allowed number of open transactions and open connections on PostgreSQL side. Be aware that 'maxDbPoolSize' also caps maximum number of open UniConfig transactions (1 UniConfig transaction == 1 database transaction == 1 connection to database)."]},{"l":"UniConfig node identification and heartbeat","p":["By default, UniConfig node name is generated randomly. This behaviour can be changed by setting'dbPersistence.uniconfigInstance.instanceName'. Instance name is leveraged, for example, in the clustering of stream subscriptions.","UniConfig nodes reports themselves in the cluster by updating heartbeat timestamp in database. Currently, this feature is not used by any other component in the UniConfig cluster. Reporting interval can be adjusted by 'dbPersistence.heartBeat.heartbeatInterval' field.","Example:"]},{"l":"Kafka and notification settings","p":["This section contains settings related to connections to Kafka brokers, Kafka publisher timeouts, authentication, subscription allocation, and rebalancing settings.","Example with essential settings:"]},{"l":"Load-balancer configuration","p":["The following YAML code represents sample Traefik configuration that can be used in the clustered UniConfig deployment(deployment with 1 Traefik node). There is one registered entry-point with identifier 'uniconfig' and port '8181'.","Next, it is needed to configure UniConfig docker containers with traefik labels - UniConfig nodes are automatically detected by Traefik container as 'uniconfig' service providers. There is also URI prefix '/rests', name of the'sticky cookie' 'uniconfig_server_id' and server port number '8181' (UniConfig web server is listening to incoming HTTP requests on this port).","Values of all traefik labels should be same on all nodes in the cluster - scaling of UniConfig service in the cluster(for example, using Docker Swarm tools) is simple since container settings do not change.","The similar configuration, like the presented one with Traefik, can be achieved using other load-balancer tools, such as HAProxy."]},{"l":"Clustering of NETCONF subscriptions and notifications","p":["When device is installed with stream property set, subscriptions for all provided streams are created in database. These subscriptions are always created with UniConfig instance id set to null, so they can be acquired by any UniConfig from cluster. Each UniConfig instance in cluster uses its own monitoring system to acquire free subscriptions. Monitoring system uses specialized transaction to lock subscriptions which prevents more UniConfig instances to lock same subscriptions. While locking subscription, UniConfig instance writes its id to subscription table to currently locked subscription and which means that this subscription is already acquired by this UniConfig instance. Other instances of UniConfig will not find this subscription as free anymore."]},{"l":"Optimal subscription count and rebalancing","p":["With multiple UniConfig instances working in a cluster, each instance calculates an optimal range of subscriptions to manage.","Based on optimal range and a number of currently opened subscriptions, each UniConfig node (while performing a monitoring system iteration) decides whether it should:","Acquire additional subscriptions before optimal range is reached","Stay put and not acquire additional subscriptions in case optimal range is reached","Release some of its subscriptions to trigger rebalancing until optimal range is reached","When an instance goes down, all of its subscriptions will be immediately released and the optimal range for the other living nodes will changemanaged network devices and thus the subscriptions will be reopened by the rest of the cluster.","There is a grace period before the other nodes take over the subscriptions. So in case a node goes down and up quickly, it will restart the subscriptions on its own.","Following example illustrates a timeline of a 3 node cluster and how many subscriptions each node handles:","notifications-in-cluster-rebalancing","The hard limit still applies in clustered environment and it will never be crossed, regardless of the optimal range."]}],[{"l":"Uniconfig properties","p":["UniConfig can be extensively configured using application properties located in the application.properties file.","Application properties can be separated into three groups:","Runtime mutable properties can be modified in runtime (using the update-properties RPC), their changes take effect in runtime and the properties are persisted in the database.","Database persisted properties include all runtime mutable properties and some additional properties. These properties are stored in the database, which is always their primary source. With UniConfig Cloud Config, they remain constant across UniConfig instances in the same cluster and cannot be overridden via the application properties file.","Regular UniConfig properties comprise all the remaining properties. These properties can always be changed using application.properties and can differ between UniConfig instances.","Database persisted properties include the following property prefixes:","crypto","schema-settings","callbacks","notifications.kafka","netconf-default-parameters","gnmi-default-parameters","cli-default-parameters","These properties are stored in the properties table and are also known as default properties. They can be read and updated using the read-properties RPC and update-properties RPC.","After UniConfig is started, if default properties are found in the database, UniConfig will use the values in the database. For properties not found in the database, values from the first UniConfig instance after startup are used (by the application.properties file or env variables) and saved in the database for the next UniConfig instances."]},{"l":"UniConfig Cloud Config","p":["UniConfig Cloud Config is used to retain the same property values between distributed UniConfig instances connected via a message broker. It is largely the same technology as Spring Cloud Config with JDBC backend and Spring Cloud Bus. The main difference is that UniConfig Cloud Config Server and Cloud Config Client are in the same project, while Spring requires a separate Cloud Config Server application.","By calling a special signal (the Refresh Bus Endpoint call) during runtime, the system sets the same value for persisted properties in all UniConfig instances. The signal is called immediately after mutable properties are modified using the update-properties RPC. The specific UniConfig instance calling the signal sends Kafka events containing the changed properties, while other instances read those properties from the database and use the refresh endpoint to update them in runtime."]},{"l":"UniConfig startup with UniConfig Cloud Config","p":["UniConfig startup with UniConfig Cloud Config: startup-with-ucc","Before starting UniConfig, enable Cloud Config by using the following properties:","On startup, UniConfig checks the database for any default properties to configure:","If default properties are found in the database, UniConfig manually refreshes its property values to use those in the database.","If no default properties are found, UniConfig uses its existing properties and, once loaded, saves them in the database for the next UniConfig instances.","At the end of Spring initialisation, the Refresh Bus Endpoint is called. This refreshes default properties with the database values for all UniConfig instances connected via the Kafka refresh topic. A second refresh during the UniConfig startup cycle is required if several instances were started simultaneously and the database contains no property values to synchronize for properties (especially encryption keys).","At application runtime, if the update-properties RPC is used with default properties on input, UniConfig updates the properties in the database. It also calls the Refresh Bus Endpoint, which reloads properties for all UniConfig instances connected via Kafka."]},{"l":"UniConfig startup without UniConfig Cloud Config","p":["UniConfig startup without UniConfig Cloud Config: startup-without-ucc","Before starting UniConfig, disable Cloud Config by using the following properties:","On startup, UniConfig checks the database for any default properties to configure:","If default properties are found in the database, UniConfig manually refreshes its property values to those in the database.","If no default properties are found, UniConfig uses its existing properties and, once loaded, saves them in the database for the next UniConfig instances.","At the end of Spring initialisation, the Refresh Bus Endpoint is not called.","At application runtime, if the update-properties RPC is used with default properties on input, Uniconfig updates the properties in the database but not inside the application. This will therefore only affect the next UniConfig instance started after the properties are updated."]}],[{"l":"Performance characteristics","p":["This page contains reference performance characteristics for Uniconfig.","We try to answer the question how fast can a certain number of devices with a certain amount of configuration be installed and fully synced by","a single Uniconfig instance","3-node Uniconfig deployment with load balancer","The unit of measurement is: Number of configuration lines / per single CPU core / per minute. This number can then be roughly applied to any other similar device being installed by uniconfig."]},{"l":"CLI devices","p":["There are 2 main families of CLI devices: those using Cisco style configuration (configuration in sections) and devices that use one-line style of configuration (without sections) such as Ciena SAOS 8.","It is important to distinguish performance characteristics of these 2 families."]},{"l":"Netconf devices","p":["// TBD"]},{"l":"Tree-like style of configuration","p":["Cisco style of confituration (IOS, IOS-XR etc.)","// TBD"]},{"i":"one-line-style-of-configuration-devices-saos-performance-tests","l":"One-line style of configuration devices (SAOS) performance tests","p":["Important caveats:","Measurement were performed on simulated devices = no device overhead","Measurement were performed on a local network = no network overhead","Measured on Uniconfig version 5.0.12","Simulated devices were of two flawors: half with small configuration and the half with big configuration","Tests:","Single node deployment of Uniconfig resources: CPU 4 cores and RAM 4 GB","3 node deployment of Uniconfig - resources per node: CPU 4 cores and RAM 4 GB","3 node deployment of Uniconfig - resources per node: CPU 6 cores and RAM 4 GB"]},{"i":"device-installation--synchronization","l":"Device installation & synchronization"},{"i":"test-a---one-node-uniconfig","l":"Test A - one node Uniconfig","p":["Devices running SAOS operating system (Ciena) and similar","Inputs: 375 x SAOS 6 devices configuration: 8834 json lines = 1510 cli config lines (brief config)","375 x SAOS 8 devices configuration: 277375 json lines = 30705 cli config lines (brief config)","Evaluation: 750 devices were registered in 7.5 hours on single node Uniconfig using 4 cores Average one device instalation duration = (7.5 * 60 minutes) / 750 devices = 0.6 minutes Average number of json lines per device = (8834 + 277375)/2 = 143104 lines lines of json / per core / per minute = 143104 lines / 4 cores / 0.6 minutes = 59626 Average number of cli lines per device = (1510 + 30705)/2 = 16107,5 lines lines of cli / per core / per minute = 16107,5 lines / 4 cores / 0.6 minutes = 6711","Installation & sync rate:","59,626 lines of json / per core / per minute","or","6,711 lines of raw cli configuration / per core / per minute","A single uniconfig node is capable of installing (and fully syncing) 100 Ciena (SAOS 8) devices with 15k lines of configuration(~ 123k lines of formatted json in Uniconfig) in 55 minutes using 4 CPU cores","Recommended batch size for parallel installation in such case would be about 50 devices per batch as the parallelism is limited by the number of available cores."]},{"i":"test-b---3-nodes-of-uniconfig-with-load-balancer","l":"Test B - 3 nodes of Uniconfig with load balancer","p":["Devices running SAOS operating system (Ciena) and similar","Inputs: 750 x SAOS 6 devices configuration: 8834 json lines = 1510 cli config lines (brief config)","750 x SAOS 8 devices configuration: 277375 json lines = 30705 cli config lines (brief config)","Evaluation for 4 core deployment: 1500 devices were registered in 5.5 hours on 3 node Uniconfig deployment each using 4 cores Average one device instalation duration = (5.5 * 60 minutes) / 1500 devices = 0.22 minutes Average number of json lines per device = (8834 + 277375)/2 = 143104 lines lines of json / per core / per minute = 143104 lines / 3 4 cores / 0.22 minutes = 54206 Average number of cli lines per device = (1510 + 30705)/2 = 16107,5 lines lines of cli / per core / per minute = 16107,5 lines / 3 4 cores / 0.22 minutes = 6101","Installation & sync rate:","54,206 lines of json / per core / per minute","or","6,101 lines of raw cli configuration / per core / per minute","A 3 nodes of uniconfig with loadbalancer are capable of installing (and fully syncing) 100 Ciena (SAOS 8) devices with 15k lines of configuration(~ 123k lines of formatted json in Uniconfig) in 19 minutes using 12 CPU cores","Recommended batch size for parallel installation in such case would be about 150 devices per batch as the parallelism is limited by the number of available cores."]},{"i":"test-c---3-nodes-of-uniconfig-with-load-balancer","l":"Test C - 3 nodes of Uniconfig with load balancer","p":["Devices running SAOS operating system (Ciena) and similar","Inputs: 750 x SAOS 6 devices configuration: 8834 json lines = 1510 cli config lines (brief config)","750 x SAOS 8 devices configuration: 277375 json lines = 30705 cli config lines (brief config)","Evaluation for 6 core deployment: 1500 devices were registered in 3.7 hours on 3 node Uniconfig deployment each using 6 cores Average one device instalation duration = (3.7 * 60 minutes) / 1500 devices = 0.148 minutes Average number of json lines per device = (8834 + 277375)/2 = 143104 lines lines of json / per core / per minute = 143104 lines / 3 6 cores / 0.148 minutes = 53717 Average number of cli lines per device = (1510 + 30705)/2 = 16107,5 lines lines of cli / per core / per minute = 16107,5 lines / 3 6 cores / 0.148 minutes = 6046","Installation & sync rate:","53,717 lines of json / per core / per minute","or","6,046 lines of raw cli configuration / per core / per minute","A 3 nodes of uniconfig with loadbalancer are capable of installing (and fully syncing) 100 Ciena (SAOS 8) devices with 15k lines of configuration(~ 123k lines of formatted json in Uniconfig) in 13 minutes using 18 CPU cores","Recommended batch size for parallel installation in such case would be about 150 devices per batch as the parallelism is limited by the number of available cores."]}],[{"l":"Monitoring"},{"l":"Monitoring using Metrics","p":["UniConfig exposes multiple metrics to monitor the traffic and other useful values to monitor its performance. Output can be in form of plaintext log messages in the log file metrics.log in the log directory in the root of the distribution or in the form of raw data in CSV format from which it can be further processed by the 3rd party visualization tools. CSV files are located in the metrics directory in the root of the distribution."]},{"l":"Types of metrics","p":["Gauge - reports instantanious value at a point in time (for example queue size)","Meter - measures total count of event occurences, total mean rate, mean rates for past 1, 5 and 15 minutes time windows"]},{"l":"List of notable metrics exposed by UniConfig","p":["Gauges","io.frinx.uniconfig.manager.impl.task.TaskExecutorImpl.queue_size - The number of tasks in the queue waiting for execution","org.apache.sshd.server.SshServer.active_sessions - The number of active CLI sessions","org.opendaylight.controller.uniconfig.transaction.manager.api.UniconfigTransactionManager.open_transaction_count - The number of open transactions","Meters","org.opendaylight.yangtools.yang.common.RpcResult.rpc_invoke - All the invoked RPCs by Uniconfig","org.opendaylight.controller.uniconfig.transaction.manager.impl.UniconfigTransactionManagerImpl.transaction_invoke - All the invoked transactions in Uniconfig","io.frinx.uniconfig.shell.cli.SshTerminal.cli_message - All the invoked commands in Uniconfig CLI shell"]},{"l":"Configuration","p":["Configuration is done via a section in \"uniconfig-lighty-config.json\" file:"]},{"l":"Example output","p":["metrics/org.opendaylight.controller.uniconfig.transaction.manager.impl.UniconfigTransactionManagerImpl.transaction_invoke.csv","log/metrics.log"]}],[{"i":"uniconfig-client-sdk","l":"UniConfig Client (SDK)","p":["Uniconfig client SDK is implemented in Java 17 and uses Uniconfig's RESTconf API.","The SDK provides following advantages over raw RESTconf:","SDK is versioned and tied tied to a specific Uniconfig release","Every version is tested","Type safety","Additional features on top of basic RESTconf facade such as:","Integration with Kafka (notification listener)","Client side diff (to calculate diff between 2 versions of config)","etc."]},{"l":"Basic device configuration management","p":["An example of a simple read & write use case on top of a single device called vnf."]},{"l":"Integration with Kafka","p":["Uniconfig SDK implements a kafka listener that taps into Uniconfig notifications streams and allows the client to consume the notifications easily. Notifications available through Uniconfig are: Device notifications, alerts and telemetry but also Uniconfig generated notifications in the audit log topic. For further information see Uniconfig notifications (kafka)."]},{"l":"Client side diff","p":["Uniconfig SDK offers a diff calculation feature that can calculate a delta between a before device configuration state and after configuration state. This diff is then transformed into a patch operation and sent to Uniconfig's RESTconf API.","Client side diff calculation is useful in specific cases, where a substential amount of configuration data is avalable and modified outside of Uniconfig. Typically, the entire after state would have to be pushed into Uniconfig and Uniconfig would calculate its own diff internally. By calculating on the client side, it is possilble to reduce the network communication between client and Uniconfig (as less data has to be sent) and to reduce how much diff calculation Uniconfig has to do itself (shifting it to the client side).","This feature leverages the same implementation of diff calculation that is used withing Uniconfig itself and requires YANG schemas to be present for the computation.","Example usage:","Notable features of client side diff:","Requires YANG schemas in order to work","JSON data have to conform to those YANG models","Allows for templated values to be present in the JSON","Produces a single PATCH operation containing all changes detected. See YANG patch in Uniconfig","Changes detected: Create, Update, Delete, Reorder (lists and leaf-lists)","Uses the same implementation as Uniconfig's calculate-diff"]}],[{"l":"Developer Guide","p":["This guide provides instructions on how to extend UniConfig to support more devices, commands and operations.","Guides on how to extend UniConfig to support a new device or new commands:","Architecture","Translation Units in general","Translation Units Documentation for FRINX Uniconfig","OpenConfig to device config mapping","Developing a new translation unit","Implementing CLI Translation Unit","NETCONF Unified Translation Unit","Native-CLI translation units","Metrics"]}],[{"l":"Architecture"},{"l":"Pre-requisite reading","p":["Honeycomb design documentation:","https://wiki.fd.io/view/Honeycomb https://docs.fd.io/honeycomb/1.18.04/release-notes-aggregator/release_notes.html","CLI plugin available presentations:","https://www.dropbox.com/sh/ry2ru5vizv7st8u/AAAntbCRHb1yS_NmEpbXG1WBa?dl=0"]},{"l":"Building on honeycomb","p":["The essential idea behind the southbound plugins comes from Honeycomb. Honeycomb defines, implements and uses the same pipeline and the same framework to handle data. The APIs, some implementations and also SPIs used in the southbound plugin's translation layer come from Honeycomb. However, the southbound plugin creates multiple instances of Honeycomb components and encapsulates them behind a mount point.","The following series of diagrams shows the evolution from Opendaylight to Honeycomb and back into Opendaylight as a mountpoint:","High level Opendaylight overview with its concept of a Mountpoint:","ODL","High level Honeycomb overview:","HC","Honeycomb core (custom MD-SAL implementation) overview:","Honeycomb's core","How Honeycomb is encapsulated as a mount point in Opendaylight:","Honeycomb's core as mountpoint"]},{"l":"Major components","p":["The following diagram shows the major components of the southbound plugin and their relationships:","CLI plugin components"]},{"l":"Modules","p":["The following diagram shows project modules and their dependencies:","CLI plugin modules"]}],[{"l":"Translation Units in general"},{"l":"Module structure","p":["Translation unit is a self contained project which implements a mapping between OpenConfig based YANG models and device specific configuration. It is used by the FRINX ODL to perform translation between device specific configuration model and standard (OpenConfig) models. A unit usually consists of:","Handlers","Readers","Writers","TranslateUnit implementation","RPCs"]},{"l":"Handlers","p":["Each complex node in YANG (container, list, augment...) should have a dedicated handler (Reader, Writer)","This enables extensibility, readability and the framework can easily filter and process the data this way","Unless there is a need to also handle child nodes, in which case register the handler using subtreeAdd method from the registries","There are 2 types of handlers: Readers (Read operation) and Writers(Create, Update, Delete operation)","One can implement just the readers or both readers and writers for YANG models. Writers must have counterpart readers because of reconciliation.","Readers and Writers should use the InstanceIdentifier parameter they receive in readCurrentAttributes or writeCurrentAttributes methods to find information about keys for their parent nodes. E.g. Reader registered under ID: /interfaces/interface/config will always receive keyed version of that ID: /interface/interface[Loopback0]/config. So it can use method firstKeyOf on InstanceIdentifier to get the keys.","RWUtils class contains methods for InstanceIdentifier manipulation.","Readers and writers can be easily tested and it is necessary to provide unit tests for all of them. It's important to cover readCurrentAttributes and writeCurrentAttributes with all possible scenarios (all data there, no data there, partial data there...)","Writers may use Preconditions.checkArgument() before accessing the device. Fail of the precondition check does not invoke default rollback(opposite operation) on the writer where precondition is located."]},{"l":"Base Handlers","p":["When a handler for the same YANG node is implemented to conform various devices, it tends to lead to a lot of boilerplate and duplicate code. Therefore, we should implement a base handler for such handlers. How does it work:","create a base-project (if there isn't any) to group base handlers(e.g. for an interface handler, choose interface-baseproject)","each base handler needs to be abstract and implement same interfaces as the original handler","extract common functionality in the base handler. Common functionality means that it will conform the majority of the original handlers. If a handler does not share the extracted functionality, it needs to override original interface methods, to hide the extracted functionality.","let original handlers extend base abstract handler"]}],[{"l":"Translation Units Documentation for FRINX Uniconfig"},{"l":"Auto-generated documentation","p":["A documentation to translation-units that is generated automatically from the source code and javadocs can be found here. This documentation is useful to check actual implementations, whether a functionality is implemented for a particular device and by which protocol (netconf or cli)."]},{"l":"Manual documentation","p":["This repository contains documentation for all available translation units. A translation unit is a piece of code that includes handlers to read from or write to a specific device (e.g. Cisco IOS classic router) and facilitates the translation in OpenConfig models. The purpose of this documentation is to see which commands can be read and set and how they map to the respective YANG models. Every section has a README file that provides an overview of all show and configuration commands that are supported."]},{"l":"OPERATIONAL datasets","p":["Go to operational datasets","Show commands are commands that usually on Cisco device start with'show'. The aim is to obtain data from the router."]},{"l":"URL","p":["GET operation issued on operational datastore"]},{"l":"OPENCONFIG YANG","p":["In case of show commands this section is a sample output of a particular show command."]},{"l":"OS COMMANDS","p":["In this section we list the actual router commands with sample outputs, where the data obtained and transformed into OpenConfig YANG is marked as bold. We list show commands and outputs for each supported device OS.","IOS XR | IOS Classic/XE | Junos"]},{"l":"DEVICE YANG","p":["In case of CLI units, the unit parses the output of the CLI command directly into OC YANG. In case of Netconf units, the output is mapped to OC YANG through Device YANG (YANG model supported by the device). In case of Netconf units, the YANG is also written in documentation. This section is a link to XML unit test input testing this operation."]},{"l":"UNIT","p":["Link to github code where this show command is implemented along with unit version range."]},{"l":"CONFIGURATION datasets","p":["Go to config datasets"]},{"i":"url-1","l":"URL","p":["PUT operation with given URL will result in creating of data in config datastore DELETE operation with given URL will result in removing data in config datastore"]},{"i":"openconfig-yang-1","l":"OPENCONFIG YANG","p":["In case of configuration commands, this section represents the HTTP body in PUT operation"]},{"i":"os-commands-1","l":"OS COMMANDS","p":["In this section we list the actual router commands that are mapped to the OpenConfig YANG model. Data transformed into OpenConfig YANG is marked as bold. We list commands for each supported device OS.","IOS XR | IOS Classic/XE | Junos"]},{"i":"device-yang-1","l":"DEVICE YANG","p":["In case of Netconf units, the device yang represents command sent to the device in device YANG model. This section is a link to XML unit test input testing this configuration."]},{"i":"unit-1","l":"UNIT","p":["Link to github code where this config command is implemented along with unit version range."]}],[{"l":"OpenConfig to device config mapping"},{"l":"Finding mapping between device and the model","p":["Preferred YANG models for device config and operational data are OpenConfig models.","These models usually represent configuration part in container config and operational part in container state. Operational data is config data + operational data.","YANG models used in UniConfig framework need to be located in https://github.com/FRINXio/openconfig. In case the desired functionality is not modeled yet, you can create new YANG with its own structure or it can augment existing OpenConfig models. Guideline, how to write OpenConfig models can be found at http://www.openconfig.net/docs/style-guide/."]},{"l":"Choosing the right YANG models","p":["Before writing a custom YANG model for a unit, it is important to check whether such a model doesn't already exist. There are plenty of YANG models available, modeling many aspects of network device management. The biggest groups of models are:","OpenConfig https://github.com/openconfig/public/tree/master/release/models","IETF https://github.com/YangModels/yang/tree/master/standard/ietf","It is usually wiser to choose an existing YANG model instead of developing a custom one. Also, it is very important to check for existing units already implemented for a device. If there are any, the best approach will most likely be to use YANG models from the same family as existing units use."]},{"l":"Existing documentation","p":["There is translation-units-docs page as a single point of truth for mapping. Use`` notation for variables in the templates. This notation is postman compatible."]}],[{"l":"Developing a new translation unit","p":[".pom file of the unit","add your unit as a dependency to artifacts/pom","dependencies","handlers (readers/writers)","https://github.com/FRINXio/cli-units","https://github.com/FRINXio/unitopo-units","name of the unit should be in format device-domain-unit(e.g. ios-interface-unit, xr-acl-unit)","package name should be in format io.frinxcli|netconf., device name and domain (e.g. io.frinx.cli.unit.ios.interface)","point to correct unit parent","The easiest way how to develop a new transaction unit is to copy existing one and change what you need to make it work. E.g. if you are creating an interface translation unit, the best way is to copy existing interface translation unit for some other device, that is already implemented. You can find existing units on github:","This section provides a tutorial for developing a new translation unit.","Unit class","unit tests","What you need to add:","What you need to change:","What your unit needs to contain:"]},{"i":"best-practices-for-handlers-readerswriters","l":"Best practices for handlers (readers/writers)","p":["All comments are in English","All defined exceptions can be thrown from the code","All new dependencies and imports are actually used","All variables/methods are actually used","Before pushing the code make sure:","Chunk","Code has correct spacing","Commented out code","Comments are appropriate to the code behavior","Constants","Do not push code that contains following:","Double blank lines","java regexes","New classes/interfaces have the correct license header","New classes/interfaces/yang model have correct date","Reflection","Show commands","Static imports","Trailing whitespaces or tabs"]}],[{"l":"Implementing CLI Translation Unit","p":["CLI Translation units are located in https://github.com/FRINXio/cli-units repository. Java is used in CLI translation units."]},{"l":"Init Unit","p":["Init translation unit does not contain readers and writers but it only contains implementation of TranslateUnit. There should be only one init translation unit per device type. Purpose of the init TU is to setup CLI prompt and define rollback strategy.","The implementation of TranslateUnit needs to override methods:","SessionInitializationStrategy getInitializer(@Nonnull final RemoteDeviceId id, @Nonnull final CliNode cliNodeConfiguration)","Implement and return device specific SessionInitializationStrategy where:","Setup device CLI terminal with attributes like width and length allowing to display infinite output.","Enter desired CLI mode which will be used as default - every reader and writer gets CLI prompt in this state (e.g. EXEC mode for IOS, config mode for IOS-XR, cli mode for Junos)","These methods may be overridden if necessary:","getPreCommitHook()- method that is invoked before actual commit is written into device. For example this method can enter configuration mode.","getCommitHook()- method that invokes actual commit and should catch any error on commit. Also it should handle any post-commit actions when the commit was successful.","getPostFailedHook()- method that is invoked when commit fails. Should implement aborts or revert strategies.","getErrorPatterns()- method returning Java Patterns with regular expressions that match device specific error patterns.","getCommitErrorPattern()- method returning Java Patterns with regular expressions that match device specific error patterns that can be returned by the device after issuing commit."]},{"l":"Translate Unit","p":["Handlers(readers/writers) need to be registered in this method. Parameter context.getTransport() returns Cli object containing methods for communication with a device via CLI - should be passed to readers/writers.","Implementation of TranslateUnit must be registered into TranslationUnitCollector and must specify device type and device version during registration. Snippet below shows registration of IosXRInterfaceUnit for device type \"ios xr\" all versions.","Implementation of TranslateUnit must implement these methods:","Instance-identifier in generic reader/writer must be without keys pointing to the target composite node used in implemented reader/writer.","Instance-identifiers for YANG container and list (not for augmentations and nodes behind augmentations) are automatically generated to IIDs class (used in examples bellow) during build of openconfig project.","Return RPC services implemented in the translation unit. Parameter context.getTransport() returns Cli object containing methods for communication with a device via CLI - may need to be passed to RPC implementations. Default implementation returns empty Set.","Return unique string among all translation units which will be used as ID for the translation unit (e.g. \"IOS XR Interface (Openconfig) translate unit\")","Return YANG models containing composite nodes handled by handlers(readers/writers). Default implementation returns empty Set if no handlers are implemented.","Set getRpcs(@Nonnull Context context)","Set getYangSchemas()","Set getSupportedVersions()","String getUnitName()","This method should also registers for general Openconfig checks:","This method should return specific device version that work with this handler.","Translate unit class must implement interface io.frinx.cli.unit.utils.AbstractUnit. Naming convention for translate unit class is device-type+openconfig-domain+Unit (e.g. IosXrInterfaceUnit). Translate unit class is usually instantiated, initialized and closed from Blueprint.","void provideHandlers()"]},{"l":"Ordering of handlers","p":["As the example shows, the ip address command must be executed after the interface command.","Registration of Ipv4ConfigWriter by using the addAfter method ensures that the OpenConfig ip address data is translated after OpenConfig interface data. That means CLI commands are executed in the desired order.","rRegistry.add","rRegistry.addNoop","rRegistry.subtreeAdd","The following sample shows a CLI translation unit with dependency between 2 writers. The unit is dedicated for interface configuration on a Cisco IOS device.","This example uses method subtreeAddAfter instead of subtreeAdd. Last parameter in this method shows dependency on writer registered under IIDs.IN_IN_CONFIG.","Use for writers handling data of whole composite node subtrees. This ensures that if only a child node is updated, the writer gets triggered. Method subtreeAdd requires a set of IIDs for all handled children, the IIDs must start from the reader itself, not from root.","Use to register noop writers","Use when a reader implementation also fills composite child nodes of target composite node. Method subtreeAdd requires a set of IIDs for all handled children.","Use when common GenericConfigListReader, GenericConfigReader,* GenericOperListReader or GenericOperReader need to be registered.","Use when common GenericListWriter or GenericWriter are registered.","VRF writer should be between them. If the order is not expressed during registration, commands might be executed on device in an unpredictable/invalid order.","wRegistry.add","wRegistry.subtreeAdd","Writers are stored in a linear structure and are invoked in order of registration. When registering a writer a relationship with another writer or set of writers can be expressed using addBefore, addAfter, subtreeAddBefore, subtreeAddAfter methods. E.g. InterfaceWriter and VRFInterfaceWriter should have a relationship: InterfaceWriter -> VRFInterfaceWriter so that first an interface is created and only then assigned to VRF."]},{"l":"Device registration","p":["In TranslateUnit we had just created, e.g. MplsUnitXR4.java, we have to register device as a constant located../iosxr/init/IosXrDevices.java containing device type and version as described in TranslateUnit documentation.","This unit can reuse all writers/readers from existing ones, except the writer (or other handler) we want to alter or create (in our example writer for tunnel configuration). We have to create a new writer with desired behaviour and add it into provideWriters method."]},{"l":"Readers","p":["Readers are handlers responsible for reading and parsing the data coming from a device","There are 2 types of readers: Reader and ListReader. Reader can be used to handle container or augmentation nodes and ListReader should handle list nodes from YANG.","Both types need to implement readCurrentAttributes to fill the builder with appropriate values","ListReader needs to also implement getAllIds() where it retrieves a key for each item to be present in current list. After the list is received, framework will invoke readCurrentAttributes for each item from getAllIds","Readers should always use overloaded blockingRead method which takes in the ReadContext since that method performs caching internally","Use full version of commands e.g. show running-config interface instead of sh run int"]},{"l":"Reading of CLI and device configuration","p":["CLI readers maintain translation between device and yang models. We're sending read commands to the device and outputs are cached. This process is shown below.","Reading CLI conf from device"]},{"i":"reading-of-configuration-from-cli-network-device---different-scenarios","l":"Reading of configuration from CLI network device - different scenarios","p":["The diagram below shows four specific scenarios:","Configuration is read using show running-config pattern for the first time","Another configuration is read using running-config pattern","cache can be used","BGP configuration/state is read using \"show route bgp 100\"","the running-config pattern is not used","BGP configuration/state is read using \"show route bgp 100\" again","cached can be used","Different scenarios"]},{"l":"Mandatory interfaces to implement","p":["Each reader needs to implement one of these interfaces based on type of target node in YANG. These interfaces also contain util methods which may be used for better manipulation with data. For more information about methods please read javadocs.","CliConfigListReader- implement this interface if target composite node in YANG is list and represents config data.","CliConfigReader- implement this interface if target composite node in YANG is container or augmentation and represents config data.","CliOperListReader- implement this interface if target composite node in YANG is list and represents operational data.","CliOperReader- implement this interface if target composite node in YANG is container or augmentation and represents operational data.","In cases where you want to invoke multiple readers on reading one YANG node, extend following abstract classes:","CompositeListReader- extend this abstract class if multiple list readers need to be invoked when reading specific list in YANG.","CompositeReader- extend this abstract class if multiple readers need to be invoked when reading specific node in YANG.","A practical example of their usage is reading network instance based on it's type. All child readers need to implement a check when the particular reader should be invoked or the parent reader should move on to the next reader.","For example child reader for bgp (located under protocol) needs to check if identifier in protocol has value BGP. Otherwise reader for bgp will be invoked even if protocol identifier is OSPF."]},{"l":"Util classes","p":["ParsingUtils- use methods of this util class if you want to parse plaintext to java object builder"]},{"l":"Plaintext parsing hints","p":["Use as specific regular expressions when parsing CLI output as possible.","For Cisco CLI devices avoid using section and other advanced formatting parameters. Only include, exclude and begin are allowed.","Use CONFIG data as the source of truth when parsing information from device. Except when parsing state containers (or containers explicitly marked as config false).","I.e. use show running-config | include router ospf instead of sh ospf when retrieving ospf routers list.","In some cases, it is not possible to just use config data e.g. sh run interface does not show any data for interfaces that have no configuration. In this case it is necessary to use operational information from e.g. show ip interface brief","Use following pattern when parsing multiline output from CLI, where it is difficult to extract lines and their relationships: i.e. when parsing configured BGP neighbors per address family following command can be used:","which results in:","This output can then be parsed by:","Remove newlines to get a single line of string","Replace \"router\" with \"\" to separate bgp routers per line","Find the line that matches required router bgp","Take that line and replace \"address-family\" with \"-family\" to get address-family neighbors per line"]},{"l":"Base Readers","p":["Each base reader should contain abstract methods:","String getReadCommand()- each child reader should fill in the read command used to get information needed for this reader. Arguments may vary and they are used to be more specific in the read command (eg. when creating a command to gather information about a specific interface, you may want to pass interface name as argument).","Pattern getLine(>)- there may be more such methods and they are used to get the regular expression needed to parse output of the command (eg. in case of interface reader, you will create methods getDescriptionLine, getShutdownLine etc.)","Naming of the methods should be unified in order to be easily parsed by auto-generated documentation."]},{"l":"Writers","p":["A writer needs to implement all 3 methods: Write, Update, Delete in order to fully support default rollback mechanism of the framework","Time showed that update like 1. delete, 2. write is anti-pattern and should not be used. There is just one case where it is necessary: when re-writing list entry, you must first delete the previous entry, then write the new one, otherwise the previous entry would still be present and the new entry will be added to the list.","A writer can properly work only if there is a reader for the same composite node.","A writer should check whether the command it executed was handled by the device properly (by checking the output) and if not throw one of the Write/Update/Delete FailedException","Chunk templating framework is preferred to use in writers. It gives us:","Null safety","if/loop etc. inside templates","Default values and many more","Use full version of commands e.g. configure terminal instead of conf t"]},{"i":"mandatory-interfaces-to-implement-1","l":"Mandatory interfaces to implement","p":["Each writer needs to implement one of these interfaces based on type of target node in YANG. Unlike mandatory interfaces for reading, only interfaces for writing config data are available (because it is not possible to write operational data). These interfaces also contain util methods which may be used for better manipulation with data. For more information about methods please read javadocs.","All writers override updateCurrentAttributes method and avoid delete/write combination, unless specified in a comment.","CliListWriter- implement this interface if target composite node in YANG is list. An implementation needs to be registered as GenericListWriter.","CliWriter- implement this interface if target composite node in YANG is container or augmentation. An implementation needs to be registered as GenericWriter.","CompositeWriter- extend this abstract class when multiple writers need to be invoked on one YANG node. The writers need to implement a check whether or not should they be invoked."]},{"l":"Base Writers","p":["Each base writer should contain abstract methods:","String updateTemplate(Config before, Config after) this method returns Chunk template used for writing and updating data on the device.","String deleteTemplate(Config data) this method returns Chunk template used for deleting data from device.","If updating data is done differently than writing new data, method String writeTemplate(Config data) might be used as well."]},{"l":"Chunk Templates","p":["Each original writer transformed to use a base writer should have all it's templates written in Chunk. We extended Chunk to achieve easier manipulation with data. There is now a new filter called update. It's usage is following:","\"{$data|update(mtu,mtu $data.mtu, no mtu)}\"","$data represents the data structure on which we check if it was updated from the previous state.","mtu first argument represents the name of the field that should be checked within the $data","$data.mtu second argument represents the actual string that will be sent to the device if the value of the field named in first argument was changed or didn't exist before","no mtu third argument represents the actual string that will be sent to the device if the value of the field named in first argument was deleted","optional true fourth argument, if present, lets the filter know it should send both outputs to the device, first the delete string(third argument) then the update string (second argument)","Update filter does not send any of the strings to the device, if the value did not change.","When using this filter in updateTemplate method, you must use fT() method (format template) with one pair of the arguments being \"before\", before to let the template know what data represents the previous state.","Unfortunately, Opendaylight generates boolean fields instead of Boolean and Chunk does not work with boolean fields in the same way as any other object fields. Therefore for boolean values (eg. shutdown), you cannot use update filter and checking for changes needs to be done in a traditional way."]}],[{"l":"NETCONF Unified Translation Unit","p":["Unified translation units are located in https://github.com/FRINXio/unitopo-units repository.","Kotlin is used as preferred programming language in NETCONF translation units because it provides type aliases and better null-safety."]},{"l":"TranslateUnit","p":["Translate unit class must implement interface io.frinx.unitopo.registry.spi.TranslateUnit. Naming convention for translate unit class is just name Unit. Translate unit class is usually instantiated, initialized and closed from Blueprint.","Implementation of TranslateUnit must be registered into TranslationUnitCollector and must provide set of supported underlay YANG models. Snippet below shows registration of Unit for junos device version 17.3.","Implementation of TranslateUnit must implement these methods:","toString(): String","Return unique string among all translation units which will be used as ID for the translation unit (e.g. \"IOS XR Interface (OpenConfig) translate unit\")","getYangSchemas(): Set","Return YANG models containing composite nodes handled by handlers(readers/writers). It must return empty Set if no handlers are implemented.","getUnderlayYangSchemas(): Set","Return YANG module informations about underlay models used in the translation unit. These YANG modules describes configuration of NETCONF capable device.","getRpcs(underlayAccess: UnderlayAccess): Set>","Return RPC services implemented in the translation unit. Default implementation returns an emptySet. Parameter underlayAccess represents object containing methods for communication with a device via NETCONF and should be passed to readers/writers.","provideHandlers(rRegistry: ModifiableReaderRegistryBuilder, wRegistry: ModifiableWriterRegistryBuilder, underlayAccess: UnderlayAccess): Unit","Handlers(readers/writers) need to be registered in this method. underlayAccess represents object containing methods for communication with a device via NETCONF and should be passed to readers/writers.","How to register readers/writers is described in CLI Translation Unit "]},{"l":"Readers","p":["Readers are handlers responsible for reading and parsing the data coming from a device.","There are 2 types of readers: Reader and ListReader. Reader can be used to handle container or argument nodes and ListReader should handle list nodes from YANG.","Both types need to implement readCurrentAttributes to fill the builder with appropriate values","ListReader needs to also implement getAllIds() where it retrieves a key for each item to be present in current list. After the list is received, framework will invoke readCurrentAttributes for each item from getAllIds"]},{"l":"Mandatory interfaces to implement","p":["Each reader needs to implement one of these interfaces based on type of target node in YANG.For more information about methods please read javadocs.","ConfigListReaderCustomizer- implement this interface if target composite node in YANG is list and represents config data.","ConfigReaderCustomizer- implement this interface if target composite node in YANG is container or augmentation and represents config data.","OperListReaderCustomizer- implement this interface if target composite node in YANG is list and represents operational data.","OperReaderCustomizer- implement this interface if target composite node in YANG is container or augmentation and represents operational data."]},{"l":"Base Readers","p":["Each base reader for netconf readers should be generic. The generic marks the data element within device YANG that is being parsed into. The base reader should contain abstract methods:","fun readIid(): InstanceIdentifier- each child reader should fill in the device specific InstanceIdentifier that points to the information needed for this reader. Arguments may vary and they are used to be more specific IID (e.g. when creating an IID to gather information about a specific interface, you may want to pass interface name as argument).","fun readData(data: T?, configBuilder: ConfigBuilder, )","this method is used to transform OpenConfig data (contained in ConfigBuilder) into device data (T) using .","Naming of the methods should be unified in order to be easily parsed by auto-generated documentation."]},{"l":"Writers","p":["A writer needs to implement all 3 methods: Write, Update, Delete in order to fully support default rollback mechanism of the framework","Time showed that update like 1. delete, 2. write is anti-pattern and should not be used. There is just one case where it is necessary: when re-writing list entry, you must first delete the previous entry, then write the new one, otherwise the previous entry would still be present and the new entry will be added to the list.","A writer can properly work only if there is a reader for the same composite node.","The framework provides safe methods to use when handling data on device:","safePut deletes or adds managed data. Does not touch data that was previously on the device and is not handled by the writer.","safeMerge stores just the changed data into device. Does not touch data that was previously on the device and is not handled by the writer.","safeDelete removes data from the device only if the managed node does not contain any other information (even one not handled by the writer)"]},{"i":"mandatory-interfaces-to-implement-1","l":"Mandatory interfaces to implement","p":["Each writer needs to implement one of these interfaces based on type of target node in YANG. Unlike mandatory interfaces for reading, only interfaces for writing config data are available (because it is not possible to write operational data). For more information about methods please read javadocs.","ListWriterCustomizer- implement this interface if target composite node in YANG is list. An implementation needs to be registered as GenericListWriter.","WriterCustomizer- implement this interface if target composite node in YANG is container or augmentation. An implementation needs to be registered as GenericWriter."]},{"l":"Base Writers","p":["Each base writer should be generic and contain abstract methods:","fun getIid(id: InstanceIdentifier): InstanceIdentifier-this method returns InstanceIdentifier that points to a node where data should be written","fun getData(data: Config): T- this method transforms OpenConfig data into device specific data (T)"]}],[{"l":"Native-CLI translation units"},{"l":"Modules structure","p":["The following text block displays a structure of native-cli units that are placed under root 'cli-native-units' module with 2 device types - ios-xr-5 and junos-17. There are also init units under 'ios-xr' and'junos' directories - they are still required to be implemented, however they are already part of classic translation units. The first identifier corresponds to directory name, the second identifier placed in brackets corresponds to module name. All modules are represented by 'pom.xml' files.","Description of the modules:","cli-native-units: Root module that groups all native-CLI-only modules. Submodules are specified per device-type.","unit-parent: Parent unit common for all unit modules (for example 'ios-xr-5-native-unit' and 'junos-17-native-unit'): it specifies common imports. It doesn't need any modification when a new device-type is added.","ios-xr-5-native and junos-17-native: These modules just group unit and models submodules for specific device-types. Each supported device type should have its separated module.","ios-xr-5-native-models and junos-17-native-models: They contain all YANG schemas under \"src/main/yang\" directory - device-template YANG schemas and native-CLI YANG schemas. They are described in next sections in detail.","ios-xr-5-native-unit and junos-17-native-unit: Implementations of native-CLI translation units - these modules contain only single Java file under 'io.frinx.cli.cnative.iosxr5' or'io.frinx.cli.cnative.junos17' package that is responsible for registration of YANGs and providing of device-specific information. More information can be found in the next section.","ios-xr-cli-init-unit and junos-cli-unit-unit: Reused initialization units that are required to be registered as native-cli translation units too. These units can be shared by both classic units which require implementations of handlers and native-CLI units. It is achieved by extending of'AbstractUnitWithNativeSupport' abstract class."]},{"l":"Implementation of units"},{"l":"Device-specific units","p":["All device-specific native-CLI units must extend 'GenericCliNativeUnit' abstract class. Description of the implemented methods:","getYangSchemas(): Returned set must contain all device-specific native-CLI schemas that are placed under models module except device-template YANG module that doesn't have to be placed to this set.","getRootInterfaces(): Returned list must contain all classes of root lists and containers (classes generated by MD-SAL generators from YANG schemas) - it simplifies transition between binding-aware and binding-independent worlds.","getSupportedVersions(): Set of supported device versions - it is used for identification of translation units.","getUnitName(): Name of the translation unit - it has only descriptive purposes.","getCliFlavour()(optional): By default, Cisco IOS CLI flavour is used. CLI flavour describes formatting of device running / candidate configuration that is used during parsing of configuration into the tree. Non-Cisco devices should override this method and provide custom CLI flavour in order to make native-CLI readers work (see next example with comments that describe CLI flavour parameters).","Example: Implementation of JUNOS 17 native-CLI unit:"]},{"l":"Init units","p":["Rules for implementation of init units are same for native-CLI and classic units - see documentation: \"Implementing CLI Translation Unit\", subsection \"Init Unit\". The only difference is in the extended class - if an init unit must be registered as both native-CLI and classic translation unit (the most usual scenario), then init unit must extend'AbstractUnitWithNativeSupport' and not just 'AbstractUnit' abstract class."]},{"l":"Device-template YANG model","p":["These YANG schemas are used for describing of device-specific patterns that are required for successful communication with remote CLI. Device-template YANG schema doesn't contain any data schema nodes, it consists only from YANG extensions that are declared in the'cli-native-extensions' model. Multiple native-CLI YANG models can import the same device-template model.","Sample device-template model for IOS XR 5.* devices:","Description of the supported extensions that can be used in a device-template:","show-command: Command used for displaying of the whole running / candidate configuration. It is used for initial population of the device configuration tree that is transformed into DOM format in native-CLI readers. The default string is \"show running-config\".","config-pattern: Template used for 'set' commands that apply a new configuration or update an existing configuration. It must contain '#' placeholder that is replaced by actual command that is going to be sent to remote CLI in native-CLI writers. Default string is \"#\" (without any prefix).","delete-pattern: Template used for 'delete' commands that remove some configuration from a device. It must contain '#' placeholder that is replaced by actual delete command that is going to be sent to remote CLI in native-CLI writers. Default string is\"no #\"."]},{"l":"Native-CLI YANG model","p":["These YANG models are used for modelling of device configuration. Currently supported schema nodes include containers, lists, choice nodes, and leaves with different types. Groupings can also be freely used for organization of YANG structure. The following subsections explain general structure of the native-CLI YANG model and application of different schema nodes for modelling of device configuration with examples.","Augmentations are currently not supported in native-CLI models (except the augmentations into UniConfig configuration container which is required)."]},{"l":"Structure","p":["The following YANG snippet shows structure that should all native-CLI YANG schemas follow (variable parts are marked by square brackets):","Description of variables:","[device-type]: Type of the device for which this YANG models some part of the configuration. Examples: junos17, xr5 (if it is necessary, more specific versions can be typed).","[entity]: Part of the configuration that is modelled by this YANG. Examples: interfaces, firewall, acl.","[prefix]: Prefix that is usually an abbreviation derived from the name of the model.","[template-model]: Name of the imported device-template module. Only a single device-template module can be imported, otherwise the whole module is marked as invalid and it is skipped. Afterwards, revision-date and prefix must be selected too.","[revision] with [description]: Date of the YANG modification with description, what was changed in the specific revision. Multiple revisions can be added incrementally as YANG schema is modified.","[root-grouping]: Identifier of the root grouping that contains single root container or list. Multiple root groupings are allowed when multiple root containers are lists are required. For each root grouping there must be a separate augmentation into 'configuration' container.","Importing of the device-template model is not necessary at all - in that case, the default device template is applied - Cisco IOS XR template."]},{"l":"Containers","p":["Containers are used for representations of nodes in configuration which can have at least one child node but there is only one instance of configuration that is placed under this node. For example, let assume the following two lines in the XR 5.3.4 configuration of access-lists:","In this snippet, ipv6 is modelled by container schema node with the same identifier, because 'ipv6' command word is a root node and it can contain only single instance of list with identifier access-list:","It is also possible to wrap multiple containers in a chain. For example, the following command line:","can be modelled by following containers:"]},{"l":"Lists","p":["Similarly to containers, lists also represent command words that may have multiple children nodes. However, nodes represented by lists can have multiple instances in the configuration where individual instances are represented by one or multiple keys. Values of keys are represented by command nodes that follow list command word. For example, let consider following configuration of XR 5.3.4 interfaces:","In this case, 'interface' can be modelled as list schema node with'interface' identifier. It has a one key - interface name (possible values, based on the example, are 'MgmtEth0/0/CPU0/0', GigabitEthernet0/0/0/0, GigabitEthernet0/0/0/1, and GigabitEthernet0/0/0/2).","Name of the leaves that represent list keys are not important. Only an order of keys, in case of multiple keys, has a significance from the view of association between configuration and YANG model.","The second example presents a scenario in which a list with multiple keys must be used (IOS XR 5.3.4 access-lists):","There are two keys - name of the access-list and sequence number of the access-list entry that must be unique in scope of the single access-list. Because of this, access-list can be modelled by following list schema node:"]},{"l":"Choices","p":["Identifiers of list, container, or leaf schema nodes are always derived from words identifying parts of the command lines. Choice schema nodes are modelled differently - they are only used for modelling of multiple non-overlapping sets of children commands. Both identifier of choice schema node and case nodes are not important (names of the case schema nodes are usually chosen based on logical option they represent). Choices are handy, if it is required to add YANG-based constraint on combinations of entered commands - wrong combinations of command would fail on device anyway.","For example, the JUNOS 17 allows configuration of different interface types:","In this example, 'hold-time' is a configuration that can be applied only on physical interfaces. On the other side, LACP can be configured only on the bundle interfaces. Because of this logical separation, it has a sense to differ between physical and bundle interfaces in YANG (common settings can be placed directly under 'interfaces' list):"]},{"l":"Leaves","p":["Leaves are used for representation of command parts that don't have next children subcommands. Command node can be represented by one or more words depending on the type of the leaf. The following types of leaves are currently supported:","1. Empty: Empty leaf can be used for commands without any value(there is only one command word that identifies leaf). For example, JUNOS 17 interface 'disable' command:","can be modelled as leaf with empty type:","2. Types with primitive value: Supported primitive types include boolean, string, decimal, int8, int16, int32, int64, uint8, uint16, uint32, and uint64. All of these types can be used for commands that has a single string, boolean, or numeric value (types constrained by a range). The following commands can be modelled as one of these types(different JUNOS 17 damping settings):","YANG representation of leaves 'half-life' and 'suppress':","3. Enumeration- If there are multiple but finite set of possible strings assignable to the command, then the enumeration type should be used. Let consider the following variations of the 'mode' command (IOS XR 5.3.4 LACP configuration):","In this example, 'mode' is modelled as leaf with type enumeration with three possible values:","4. Bits: This type of leaf can be used in scenarios in which there are multiple possible values assignable to the command (similarly to enumeration), but they are not mutually exclusive - different values can be combined in a chain of strings. Consider the following options how to configure Unicast Reverse Path Forwarding on IOS XR 5.3.4:","The part of the command line starting by word 'any' can continue with random combination of options 'allow-self-ping' and 'allow-default' with random ordering too. Because of this reason, leaf with identifier 'any' has bits type:","5. Blob-data- It is a special type of leaf defined in'cli-native-extensions' that can be used for the whole command section with a random structure. It is handy for the parts of the configuration that are too complicated to be represented by different YANG structures. Internally, 'blob-data' is a type definition derived from string type. For example, JUNOS 17 firewall rules fulfils high complexity:","Commands 'from' and 'then' can be represented by leaves with'blob-type':"]}],[{"l":"Metrics"},{"l":"Monitoring Uniconfig performance","p":["Dropwizard Metrics is the framework of choice to monitor performance."]},{"l":"Registry naming","p":["All the metrics are currently stored in the uniconfig registry. It can be accessed like so:"]},{"l":"Metric types","p":["All the available metric types can be seen in the documentation."]},{"l":"Naming convention","p":["There are various best practice articles on how to name metrics but one thing is common: It should be clear what is measured."]},{"l":"Adding new metrics"},{"l":"Adding a Meter","p":["Obtain a Meter and then mark all the method calls you want to measure."]},{"l":"Adding a Gauge","p":["For Gauge method getValue() needs to be implemented. It can be done less verbously with lambda expressions so that we avoid writing boilerplate code for an anonymous class:","Here we create a Gauge that returns Integer value, access is synchronized in this case to avoid race conditions."]},{"l":"Tags","p":["Tags are currently not available in the version 4.2.x, although support for them is planned for future major release."]},{"l":"Reporters","p":["Current available reporters are reports to CSV files and reporting via Slf4j to log file."]}],[{"l":"Release notes","p":["Release notes for UniConfig 4.2.10","Release notes for UniConfig 4.2.3","Release notes for UniConfig 4.2.4","Release notes for UniConfig 4.2.5","Release notes for UniConfig 4.2.6","Release notes for UniConfig 4.2.7","Release notes for UniConfig 4.2.8","Release notes for UniConfig 4.2.9","Release notes for UniConfig 5.0.1","Release notes for UniConfig 5.0.10","Release notes for UniConfig 5.0.11","Release notes for UniConfig 5.0.12","Release notes for UniConfig 5.0.13","Release notes for UniConfig 5.0.14","Release notes for UniConfig 5.0.15","Release notes for UniConfig 5.0.16","Release notes for UniConfig 5.0.17","Release notes for UniConfig 5.0.18","Release notes for UniConfig 5.0.19","Release notes for UniConfig 5.0.2","Release notes for UniConfig 5.0.20","Release notes for UniConfig 5.0.21","Release notes for UniConfig 5.0.22","Release notes for UniConfig 5.0.23","Release notes for UniConfig 5.0.24","Release notes for UniConfig 5.0.25","Release notes for UniConfig 5.0.3","Release notes for UniConfig 5.0.4","Release notes for UniConfig 5.0.5","Release notes for UniConfig 5.0.6","Release notes for UniConfig 5.0.7","Release notes for UniConfig 5.0.8","Release notes for UniConfig 5.0.9","Release notes for UniConfig 5.1.0","Release notes for UniConfig 5.1.1","Release notes for UniConfig 5.1.10","Release notes for UniConfig 5.1.11","Release notes for UniConfig 5.1.12","Release notes for UniConfig 5.1.13","Release notes for UniConfig 5.1.14","Release notes for UniConfig 5.1.2","Release notes for UniConfig 5.1.3","Release notes for UniConfig 5.1.4","Release notes for UniConfig 5.1.5","Release notes for UniConfig 5.1.6","Release notes for UniConfig 5.1.7","Release notes for UniConfig 5.1.8","Release notes for UniConfig 5.1.9","Release notes for UniConfig 5.2.0","Release notes for UniConfig 5.2.1","Release notes for UniConfig 5.2.2"]}],[{"i":"uniconfig-507-release-notes","l":"Uniconfig 5.0.7 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implementation of context-match shell operation"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed establishing of NETCONF stream sessions","Fixed NetconfDeviceCommunicatorTest","Fixed Uniconfig client json parser tests","Fix building history output","Fixed execution of YANG action under some list entry from shell","Fixed ordering transaction log by date","Fixed types of the network-instance/interfaces","Making subscription monitoring loop more robust","Cli session closed/disconnected","Fixed removing of data-change-event subscription","Fixed merging template attribute to replaced node","CLI shell: harmonised composite key delimiter input"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Updated netconf-node-topology:concurrent-rpc-limit parameter","Refactored global DOMSchemaService","Optimization of calculate-diff RPC","Swagger and YangPackager improvements","Fix all owasp sec issues level 8","Upgrade sshd libs to version 2.8.0","Upgrade and cleanup usage of jaxb","Upgrade jetty/jersey/jax-rs dependencies","Reorganisation of NETCONF connection parameters"]}],[{"i":"uniconfig-506","l":"UniConfig 5.0.6"},{"i":"new-features","l":"✅ New Features"},{"l":"Expose operational data about transactions","p":["It would improve visibility what transactions are open on uniconfig instance - when these transactions have been open and what nodes have been changed in the transaction.","Transaction data:","identifier (uuid)","trace id / different parameter (once we support tracing)","creation time","last access time","idle timeout, hard timeout","list of changed nodes (incl. topologies)","additional context (random string, text column)"]},{"l":"Implement metric collection and reporting in Uniconfig","p":["Collect and report metrics such as:","TX pre minute","RPC calls per minute","Task execution queue size","Netconf msg sent count","CLI command sent count","…","Reporting part could be just logging the state of metrics for the time being"]},{"i":"collect-open-transactions-data-in-collect_diag_infosh","l":"Collect open transactions data in collect_diag_info.sh","p":["Please enhance debug collection script to collect details of following","Open Transaction , Read or Read-Write and if possible module which has opened the transaction","For example, this is how NCS displays.","This could help in debugging slowness issues caused if there is any transaction leak."]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Set OWASP dependency check plugin to level 9","p":["decrease owasp level to 9 (in distribution/packaging/zip pom)","fix all dependency issues so that uniconfig will successfully build"]},{"i":"cli-uc-shell---show-transaction-log-in-ordered-list--add-brief-option","l":"CLI UC shell - show transaction log in ordered list & add \"brief option\"","p":["Currently we display the transaction log as a json without ordering. We should assume that the transaction log can become very large and should still be manageable to display. Hence we are proposing the following improvements:","always show the transaction log as an ordered list. Order by transaction timestamp. The most recent transaction should be at the bottom of the list.","add a \"brief\" option to that command and display only one line per transaction log. Similar like this"]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"OpenAPI .yaml file generating incorrectly","p":["Build, Collaborate & Integrate APIs | SwaggerHub","cli-unit-general API yaml seems incorrectly generated, URIs are wrong"]},{"i":"syst_-data-change-subscriptionscontentnonconfig-not-working","l":"SYST_ data-change-subscriptions?content=nonconfig not working","p":["Based on documentation Kafka notifications","test:","test here https://gerrit.frinx.io/c/system-tests/+/13155"]}],[{"i":"uniconfig-505","l":"UniConfig 5.0.5"},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Reconfigure swagger generator for versa to produce desired depths for all APIs","p":["Since we need to create APIs with depth 4, I have noticed APIs are created to the last container when the depth of yang is less than 4. Can you make API is not generated for the last container, for example 2nd API in the below not required as “global” is the leaf container. This change will reduce size of yaml file and number of APIs"]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"Uniconfig transaction is not thread-safe","p":["It is not safe to use same uniconfig transaction simultaneously by multiple user-side threads because underlying database connection/transaction is not thread-safe in case of PostgreSQL driver and UniConfig is not doing any additional synchronisation.","Read: Chapter 10. Using the Driver in a Multithreaded or a Servlet Environment","Behaviour that was also observed in UniConfig (it is Oracle DB, but symptoms are similar): Working with multiple threads sharing a single connection"]},{"i":"failed-to-find-node--in-the-topology-uniconfig","l":"Failed to find node '' in the topology 'uniconfig'","p":["It happened already a couple of times when a workflow task failed during the execution of getting data from the device. In VFZ we have a specific task for this operation called uniconfig_read_structured_device_data which gets a specific config from the device","During this execution, there were other devices running in the parallel executing the same task and 2 read_and_execute_rpc_cli tasks too.","The device had the records in the node and mounpoint tables in the UC DB.","DONE, MOVE TO 5.0.4"]}],[{"i":"uniconfig-504","l":"UniConfig 5.0.4"},{"i":"new-features","l":"✅ New Features"},{"l":"Adding option to use json-path also for selection of some subtrees","p":["Currently, jsonpath language can be used in UniConfig only for filtering of data. However, the language itself allows also to select some data using provided json-path.","We need to expose this functionality in UniConfig API using some query parameter (the similar way as it is done for filtering) and also expose this functionality in the uniconfig-client."]},{"i":"shell-scrolling-output---more--","l":"Shell: scrolling output (--more--)","p":["Long UniConfig shell output should be displayed using some scrolling mechanism (equivalent to ‘more' or 'less’ linux tools)."]},{"i":"add-show-history-command-to-uniconfig-shell","l":"Add 'show history' command to uniconfig-shell","p":["It should display last N commands that were executed in the shell. Syntax:","Parameter 'max-number-of-output-commands' should be optional.","This command should be available from both operational and configuration modes."]},{"l":"Add support for aliases inside uniconfig shell","p":["There should be some configuration file in the config directory that will contain defined aliases. It should support also place-holders/variables for both values and arrays.","Supporting autocomplete on aliases."]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Improve maven plugin for generation Java classes","p":["yang packager: generate sources only for latest repository","settings: package name - option to change it","setting: disable prefix + javadoc on data-elements"]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"NETCONF sessions used for receiving NETCONF notifications stop working","p":["UniConfig does not use keepalive messages for checking, if NETCONF session used for receiving of NETCONF notifications is still alive and triggering reconnection procedure. As a consequence, if connection is dropped without explicit TCP interruption, then UC will not find it out and doesn’t try to re-create connection. However, device is using some form of TCP keepalive messages - device will drop connection after some time.","FIX: Uniconfig is enables and tracks keepalive messages also for NETCONF sessions that are used for NETCONF streams"]},{"l":"Optimisation of jsonb-filtering","p":["It seems that jsonb-filtering causes reading of whole configuration from PG into UC even if it should not be necessary:","checking existence of the node - currently it works by using DOM ‘exists’ operation that reads whole configuration from DB","deriving uniconfig-native prefix / YANG repository - it is derived from configuration, not from DB metadata","FIX: stopping verification of node, if jsonb-filer is used. It required reading of whole config from DB to Uniconfig what making the call much slower."]},{"i":"uc-shell-after-configrequest-commit-the-prompt-was-changed-unexpectedly-from-request-to-config","l":"Uc shell: after config/request commit the prompt was changed unexpectedly from request> to config>","p":["Previous behaviour:","After fix:"]},{"i":"uc-shell-config-mode--show-or-delete---create-object-option-should-be-removed","l":"Uc shell: config mode / show or delete - CREATE OBJECT option should be removed","p":["Previous behaviour:","e.g. here (create new template) should not be present here. Similarly others places in program. Behaviour like this is not expected by user.","After fix:"]},{"i":"uniconfig-503-prints-out-error-on-start","l":"Uniconfig 5.0.3 prints out error on start","p":["Fix: error message in the log was displayed when notifications were set to false in the lighty-uniconfig-config.json file. This error message is no longer displayed."]},{"l":"Flyway migration failed","p":["Migration of data in the database when switching to another version throws error causing that uniconfig is unable to start."]},{"l":"Yang patch operation does not work correctly with leaf list","p":["There are several issues with yang patch when operating on leaf-lists. In particular I have found issues with the insert operation and the merge operation.","Merge operation case:","If the leaf list does not exist, then the merge operation pass without problems. However, if the leaf list does already exist, then the merge fails with following error message:","Insert operation case:"]},{"l":"PATCH operation does not work with some paths and target combination","p":["Overview RestConf PATCH operation does not work with certain combination of URL and target","Details","Request URL ( is the UniConfig host, is the id under which the Sonic device is installed):","http:///rests/data/network-topology:network-topology/topology=uniconfig/node=/frinx-uniconfig-topology:configuration/openconfig-interfaces:interfaces/interface=Ethernet52","Method: PATCH Request body:","Produce this response:","Request body:","Produce empty response body with error code 500. UniConfig logs have this record:"]},{"l":"Portchannel trunk-vlans replace","p":["When one trunk vlan is already set on porchannel, then put request to change trunk vlans list returns:","Unsupported type of node …","Current workaround is deleting whole list of trunk-vlans, after that single put request to add removed and new trunk-vlans.","Postman collection is attached. Replace can be also done using this gnmic command:"]}],[{"i":"uniconfig-503","l":"UniConfig 5.0.3"},{"i":"new-features","l":"✅ New Features"},{"l":"Adding failed transactions into transaction log","p":["Description","Previously, only successfully committed transactions have been written into transaction log.","Added state to transaction log entry that determines, if transaction has been successfully or not committed - both successful and failed transactions are part of transaction log.","Documentation","Transaction Log | Frinx Docs","API","Added ‘status' leaf and split ‘commit-time’ into ‘last-commit-time’ and 'failed-commit-time’ (YANG module transaction-log):"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Integrated OWASP dependency check tool into UniConfig","p":["3-rd party libraries are check against security issues during building of UniConfig distribution. If there are some issues with security level higher than configured threshold, built will fail.","Set security threshold level to 10 and fixed corresponding critical errors."]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"i":"fixed-updating-of-leaf-list-content-orderedunordered-on-netconf-device","l":"Fixed updating of leaf-list content (ordered/unordered) on NETCONF device","p":["Unordered leaf-list must be updated using following steps:","removing all items that are not in the updated list","inserting all items that are only in the updated list","There was 1 bug: step 1. never happened.","Ordered leaf-list must be updated using following steps:","inserting/moving new/existing items in the correct order (using ordering parameters) [1]","removal of all items that are not in the updated list [2]","There was 1 bug - all items were removed and afterwards re-inserted without usage of special positional attributes - it created conflict on NETCONF layer between edits and thus splitting of NETCONF traffic into 2 edit-config messages."]},{"i":"device-discovery-32-prefix-changed-to-inclusive","l":"Device discovery /32 prefix changed to inclusive","p":["User must be able to ping network with prefix /32 - it must be rendered as single host (special case)."]},{"i":"uniconfig-shell---exit---it-is-expected-to-hit-enter-twice","l":"UniConfig shell - exit - it is expected to hit enter twice","p":["Previous behaviour:","After fix, UniConfig will print user some message, that one more is expected to leave SSH session."]}],[{"i":"uniconfig-502","l":"UniConfig 5.0.2"},{"i":"new-features","l":"✅ New Features"},{"l":"Upgrading templates","p":["Added ‘upgrade-template' RPC into 'template-manager’ YANG module:","API","Both settings are related to auto-upgrading process - they don’t influence execution of RPC which can be still done manually.","Configuration","Description","Documentation","if it fails, we will return standard RESTCONF RFC-8040 error container","Implemented automation of the upgrading process - calling of this RPC automatically at initialisation of UniConfig for all templates present in the DB that don’t use the latest repository.","Implemented RPC for upgrading template to specific YANG repository.","output template name (optional, default value = input template name)","RPC input:","RPC output:","Supplemented template configuration by 2 new settings (lighty-uniconfig-config.json) - enabledTemplatesUpgrading and maxBackupTemplateAge.","template name (mandatory)","Templates Manager| Frinx Docs","without body, just status message","YANG repository name (optional, default value = latest YANG repository)"]},{"l":"Connection notifications","p":["Description","Connection notifications are generated after state of southbound CLI/NETCONF/GNMI node is updated - either status message or connection status.","Notifications are published into dedicated Kafka topic.","They are useful especially for debugging connection issues between UniConfig and network devices.","Documentation","Kafka Notifications | Frinx Docs","API","Structure of notifications are described following YANG module 'connection-notifications':","Added settings used for configuration of Kafka topic and enabled/disabled state (YANG module kafka-brokers):","Configuration","Supplemented corresponding settings into in the lighty-uniconfig-config.json file (by default, these notifications are enabled if globally notification system is enabled):"]},{"l":"Configurable transaction idle-timeout","p":["Description","Introduced new transaction parameter that can be used at creation of new transaction and overrides global idle-timeout.","After inactivity of the transaction, it is automatically closed and an exception will be thrown if user tries to invoke some operation on the transaction.","Documentation","Example request with timeout parameter | Frinx Docs","API","Format of the query parameter 'timeout':","Uniconfig-client","Introduced TransactionParameters class - object of this class can be provided at creation of new transaction. By default, transaction-specific idle-timeout is disabled - global idle-timeout is used."]},{"l":"Added option to disable validation phase at commit","p":["Description","UniConfig uses 3-phase commit procedure - validation, confirmed-commit, confirming-commit. Validation is currently always executed on nodes that support validation and have been installed with enabled validation.","This feature introduces flag in the commit RPC using which user can control execution of validation phase.","Documentation","RPC commit | Frinx Docs","API","Added 'do-validate' field into commit RPC input (checked-commit does not supported this feature for now):","Uniconfig-client","In the uniconfig-client validation is 'disabled' by default (opposite behaviour in comparison to RESTCONF API).","Exposed new method in DOMReadWriteTx interface:"]},{"l":"Modification of connection parameters after the first installation without uninstallation","p":["Description","After some CLI/NETCONF device has already been installed, it is possible to update some connection / mount parameters (for example, ‘host' or 'password’).","User can read and update connection parameters under ‘cli' or 'topology-netconf’ topology, under specific network-topology nodes.","Afterwards, UniConfig will use updated connection parameters at the next creation of connection to device.","NETCONF sessions used for receiving of NETCONF notifications are also updated at the next monitoring iteration.","Documentation","Updating installation parameters | Frinx Docs","Uniconfig-client","Example:"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Improved aggregation of NETCONF messages","p":["Non-overlapping edit-config messages are already aggregated into one edit-config message that is sent to NETCONF server. However, this aggregation was primitive - it just serialised all modified subtrees and stacked them under root element without considering option, that paths to these subtrees may overlap.","After this improvement, edit-config message will contain compressed subtree structures without duplicated ‘wrapper’ elements."]},{"i":"added-session-id-to-netconf-logs-netconf-messages","l":"Added session-id to NETCONF logs (netconf-messages)","p":["Description","Previously, only internal Netty’s channel-id was displayed in the logs.","After this improvement, NETCONF-specific session-id, returned from device during exchanging of capabilities, will be used.","Documentation","Logging Framework | Frinx Docs","Making connection-manager unit tests more robust","Preventing random failures because of multi-threaded environment.","Improve error message if device/template doesn't exist","If device/template or another node doesn’t exist, UniConfig should return user-friendly error message that corresponding node doesn’t exist and not some YANG-related error.","Creation of new node with specified YANG repository is still allowed.","Error message before the fix:","Error message after the fix:"]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"Fixed YANG packager that does not catch broken submodules","p":["Description","Fixed reporting of two kinds of issues related to YANG submodules:","Submodules contain statement “belongs-to” some parent. That parent should contain statement “include”. When parent does not contain this statement, uniconfig marks submodule as broken.","When submodule contains “belongs-to” statement, but parent does not exists.","Improved error message output from YANG packager utility.","Documentation","Device Discovery | Frinx Docs"]},{"i":"fixed-device-discovery-behaviour-for-network-with-31-prefix","l":"Fixed device-discovery behaviour for network with /31 prefix","p":["Description","Use cases:","192.168.1.0/32 - returns empty output, there aren’t any usable hosts that can be reached","192.168.1.0/31 - special case, device-discovery component should verify two hosts - .1 and .2","192.168.1.0/30 - returns 192.168.1.1, 192.168.1.2","Documentation","Device Discovery | Frinx Docs"]},{"i":"fixed-calculate-diff-operation-augmentation-nodes","l":"Fixed calculate-diff operation (augmentation nodes)","p":["Augmentation nodes have been skipped and unwrapped during reading of data from device. It resulted in the failed / incorrect calculation of diff on UniConfig layer.","After this fix, UniConfig skips only those augmentation nodes that contain only non-config data nodes (YANG 'config false' statement)."]}],[{"i":"uniconfig-501","l":"UniConfig 5.0.1"},{"i":"new-features","l":"✅ New Features"},{"i":"propagation-of-data-change-events-from-uniconfig--unistore-configuration","l":"Propagation of data-change-events from ‘uniconfig' / 'unistore’ configuration","p":["Description","Implemented propagation of data-change-events into distinct Kafka topic. Data-change-events are currently supported per-node in ‘uniconfig' and 'unistore' network-topologies.","Using subscription, user specifies observed subtrees against data-changes. Afterwards, data-change-events are generated by UniConfig instances after some transaction is committed and committed changes contain subscribed subtrees.","API","Created new YANG module that defines data-change-events structure in form of YANG notifications and RPC calls for manipulation / reading of subscriptions:","Documentation","Kafka Notifications | Frinx Docs","Configuration","Added settings into lighty-uniconfig-config.json file:","dataChangeEventsEnabled- turning on/off generation and distribution of data-change-events (by default, they are enabled)","dataChangeEventsTopicName- name of the Kafka topic (default identifier is 'data-change-events')","Java client","Example, how to use data-change-events as triggers for callback inside UniConfig Java client:"]},{"l":"Added config option to disable immediate-commit model","p":["Description","Immediate-commit model is in some cases dangerous, because changes are automatically committed to managed network devices.","Added option to disable immediate-commit model globally.","Configuration","New setting 'isImmediateCommitEnabled' in the lighty-uniconfig-config.json:","Default value is 'true'."]},{"l":"Calling replace-config-with-oper after sync-from-network in the immediate-commit-model","p":["In the immediate-commit-model, if user called sync-from-network operation, it behaved as 'sync-to-network' operation:","reading configuration from device","resolving diff between actual (device) and intended state (last saved configuration in database)","sending resolved diff to operation - reverting changes, that have been done on device side","This dangerous if network device is configured manually by user or another tool.","Fixed by calling replace-config-with-oper operation after called-sync-from-network operation and before committing temporary transaction created in the immediate-commit model session. It will result in storing of loaded configuration to database without performing any action on managed devices.","This change alters only immediate-commit model. Build-and-commit model stays unaltered."]},{"l":"Making default CLI connection parameters configurable","p":["Description","There are couple of CLI connection parameters with some default values defined in cli-topology YANG module that can be specified at installation of device.","This feature allows user to adjust these default parameters without repetitive adjustment in the install-node RPC request.","Priority of using install parameters:","Parameter set in install RPC request","Default parameter set in database","Default parameter from YANG model","Documentation","Device installation | Frinx Docs","API","Exposed default CLI settings into distinct container that is accessible using RESTCONF API (module cli-topology):","Exposed settings in the UniConfig shell - configuration mode / settings container."]},{"l":"Making default NETCONF connection parameters configurable","p":["Description","There are couple of NETCONF connection parameters with some default values defined in netconf-topology YANG module that can be specified at installation of device.","This feature allows user to adjust these default parameters without repetitive adjustment in the install-node RPC request.","Priority of using install parameters:","Parameter set in install RPC request","Default parameter set in database","Default parameter from YANG model","Documentation","Device installation | Frinx Docs","API","Exposed default NETCONF settings into distinct container that is accessible using RESTCONF API (module netconf-node-topology):","Exposed settings in the UniConfig shell - configuration mode / settings container."]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Improved displaying of children nodes of DataNode in Java client","p":["Children nodes were organised under multiple levels of Map objects - it was not readable especially when user was debugging code.","Now, children nodes are displayed directly under simple List collection:"]},{"l":"Added YANG-based documentation to Java client","p":["Added JavaDoc description to DataNode and DataNodePath sub-classes, how they are used in comparison to YANG schema tree.","Example:"]},{"l":"Removed redundant module-name prefixes from built paths in Java client","p":["RFC-8040 specifies that module-name prefix must be added only to the first augmented elements (transition to different namespace).","Previously it worked non-optimally - module name was added to all elements of the path:","After improvement:"]},{"l":"Added option to enable PostgreSQL driver logs in UniConfig","p":["Description","Logging connections and communication between UniConfig and PostgreSQL can be handy in case of debugging of some errors.","Configuration","To log detailed information about executed queries and PG connections, user should set org.postgresql logger level to DEBUG or TRACE."]},{"l":"Added transaction-id also to both RESTCONF requests and responses","p":["Description","UniConfig transaction-id simplifies debugging of executed RESTCONF operations.","Example (added 'Uniconfig transaction' property):","Documentation","Logging Framework | Frinx Docs"]},{"l":"Hiding sensitive data in logs","p":["In the UniConfig logs are shown sensitive data like PostgreSQL DB credentials, etc. This is a potential security hole.","Example:","Fixed by hiding JSON configuration parsing details from logs."]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"Fixed invocation of device-discovery RPC multiple times","p":["RPC response contained also results from previous RPC invocation.","Fixed by isolation of RPC results."]},{"i":"fixed-setting-of-max-connection-attempts-during-device-installation-clinetconf","l":"Fixed setting of max-connection attempts during device installation (CLI/NETCONF)","p":["Description","Removed max-connection-attempts parameter from install-node RPC. It was clashing with parameters from southbound layers and introducing confusion.","Fixed switched loading of max-connection-attempts and max-reconnection-attempts on NETCONF layer. It resulted in the infinite initial maximum connections attempts (by default, there should be 1 attempt).","Setting default max-connection-attempts to 1 in YANG model (both CLI and NETCONF layers).","Documentation","Updated document: Device installation| Frinx Docs","API","connection-manager - removed leaf max-connection-attempts:","cli-topology - setting max-connection-attempts default value to 1:","netconf-node-topology - setting max-connection-attempts default value to 1:"]},{"l":"Fixed stuck UniConfig API because of interrupted SQL operation","p":["Description","Default socket-read-timeout for the PostgreSQL driver is 0 - UniConfig is waiting forever for result of some query. This causes blocking of other UniConfig operations on specific node, if connection between UniConfig and PG is dropped during execution of some query.","Fixed by exposing socketReadTimeout parameter and setting its default value to 20 seconds.","Configuration","Added socketReadTimeout to database connection settings (lighty-uniconfig-config.json):"]},{"l":"Fixed propagation of error on disabled templates","p":["If templates are disabled, then user will get direct error message with 400 status code. Previously it failed on some parsing error or it didn’t fail at all and UniConfig just ignored unknown data.","Example:"]},{"i":"fixed-pki-authentication-to-netconf-device-negative-cases","l":"Fixed PKI authentication to NETCONF device (negative cases)","p":["PKI authentication on device - attempt to install device with reference to not existing private key","Previously it failed with error:","After fix it will fail with error message that private key with specified identifier doesn’t exist.","PKI authentication on device - registering the password protected key with RPC netconf-keystore:add-keystore-entry - but providing bad password","Fixed by validation of input password against key-store.","If it is invalid, UniConfig will return error immediately and will not try to register such private key and afterwards use it during mounting process."]},{"i":"netconf-edit-config-operation-with-insert-attribute-failed-because-of-aggregated--messages","l":"NETCONF edit-config operation with insert attribute failed because of aggregated messages","p":["When insert attribute was used with value before/after, there was problem with NETCONF messages ordering in the aggregated message.","Fixed by assuring that list entry specified by insert attribute is placed before actual list entry in the edit-config message sent to NETCONF server."]}],[{"i":"uniconfig-4210","l":"UniConfig 4.2.10"},{"i":"new-features","l":"✅ New Features"},{"l":"Aggregation of all edit-config NETCONF messages into one edit-config message","p":["Each modification in the transaction was expressed using one edit-config message on southbound layer.","This approach was not optimal:","it generated more network traffic than needed","it could introduce errors, if device checks some references before committing configuration","After this patch, all NETCONF edit-config RPCs in the transaction are aggregated into single edit-config RPC with common parent element."]},{"i":"capturing-changes-in-ordered-listleaf-list-using-calculate-diff-rpc","l":"Capturing changes in ordered list/leaf-list using calculate-diff RPC","p":["Currently, changed order of list entries inside ordered list/leaf-list is displayed as updated whole list with all list entries - not optimal solution.","Added new list to calculate-diff RPC output that captures changes in the ordering of list or leaf-list elements. Such changes are not displayed under created/removed/updated containers."]},{"l":"Validation of leaf-refs","p":["Validation of leaf-ref YANG constraints that are affected by some create/delete/update operation:","Supported following leaf-ref paths:","absolute paths","relative paths","paths with 'current()' XPATH function","Added new RESTCONF query parameter into put/patch/delete operations - checkForReferences.","Implementation conforms RFC 7950 - The YANG 1.1 Data Modeling Language"]},{"l":"Encryption of leaves selected by paths","p":["UniConfig uses asymmetric encryption for ensuring confidentiality of selected leaf and leaf-list values. Currently, only RSA ciphers are supported (both global UniConfig and device-level key-pairs). Encryption is supported in ‘uniconfig’, ‘unistore’, and ‘templates’ topologies.","Global-device encryption architecture - both UniConfig and device uses PKI for encryption of data:","Global-device encryption architecture","In comparison to Global-device encryption architecture this model uses only global key-pair for encryption of data. Devices contain only plaintext data."]},{"i":"implementation-of-crypt-hash-type-from-iana-crypt-hash-yang-module","l":"Implementation of ‘crypt-hash' type from 'iana-crypt-hash’ YANG module","p":["UniConfig supports 'iana-crypt-hash' YANG model for specification of hashed values in data-tree using type definition 'crypt-hash'. Hashing works in the 'uniconfig' and 'unistore' topologies. Only NETCONF devices are currently supported because CLI cannot be natively used for reporting of device capabilities that would contain supported hashing function.","Hashing is done only in the RESTCONF layer after writing some data that contains leaves/leaf-lists with 'crypt-hash' type. Afterwards, UniConfig stores, uses, and writes to device only hashed representation of these values.","All 3 hash functions are implemented - 'MD5', 'SHA-256', 'SHA-512'. In case of 'uniconfig' topology, hashing function is selected based on reported feature in the NETCONF capability, in case of 'unistore' topology, UniConfig enforces 'SHA-512' hashing function.","Hashing model"]},{"l":"Using the latest schema at creation of template","p":["Adding configuration into UniConfig that tracks identifier of the UniConfig repository that must be used at creation of new template, if user doesn’t explicitly specify identifier of this repository using ‘schema-cache-directory’ query parameter."]},{"l":"Rebalancing of notifications cluster at runtime","p":["Random distribution of subscriptions to NETCONF notifications streams and turning on/off UniConfig instances may lead to scenario when one of the UniConfig instances in the cluster contain most of the subscriptions while others unequally smaller number.","Fixed by automatic redistribution of already created subscriptions on UniConfig instances and introduction of limits, how many subscriptions can be allocated on the one UniConfig instance in the cluster.","Cluster rebalancing"]},{"l":"Configuration","p":["Added new parameters under “notifications“ element in the lighty-uniconfig-config.json file:"]},{"l":"Implementation of RFC-8072 PATCH operation","p":["Invocation of PATCH that may contain multiple edits.","All edits are invoked sequentially and atomically as single operation.","Supported sub-operations per edit: create, delete, insert, merge, move, replace, remove.","More detailed description: RFC 8072 - YANG Patch Media Type"]},{"i":"added-missing-protocols-to-l2-for-ios-xe-cli-units","l":"Added missing protocols to L2 for IOS XE (cli-units)","p":["Parsing of following protocols:","elmi","pagp","udld","ptppd"]},{"l":"UniConfig whitelist","p":["specification of top-level containers/lists which configuration is synced from device (no other configuration is read from device)","opposite of existing blacklist functionality","either blacklist or whitelist can be specified, not both","API","updated YANG model that defines whitelist/blacklist:","Install-node RPC example (input body):"]},{"l":"UniConfig client thread model","p":["make uniconfig-client thread safe (using client from multiple threads)","making HTTP connection pools configurable (max connections, …)","API:","Introduced connection pool settings:","Introduced UniConfig server settings:","Example:"]},{"l":"Distribution of NETCONF notifications to Kafka","p":["NETCONF devices are capable of generating NETCONF notifications. UniConfig is able to collect these notifications and creates its own UniConfig notifications about specific events. Kafka is used for publishing of these notifications from NETCONF devices and UniConfig. Currently there are these types of notifications: - NETCONF notifications - notifications about transactions - audit logs (RESTCONF notifications).","NETCONF notifications - Kafka","API","Added subscription API to install-node request - 'stream' container. Example (subscription to 2 NETCONF streams - ‘NETCONF' and 'system’):","Added root list 'netconf-subscription' which contains all active subscriptions.","Corresponding YANG model:","Configuration","Provided initial configuration that can put into lighty-uniconfig-config.json:","Uniconfig-client","Example:"]},{"l":"Dynamic configuration of Kafka brokers","p":["Location of Kafka brokers and other Kafka settings must be configurable using RESTCONF API.","Persistence of this configuration in the database. All UniConfig instances must use same settings.","Option to change/read these settings using CRUD RESTCONF operations.","Configuration that is placed in the configuration file must be used only as initial configuration.","API","RESTCONF API used for reading and modification of all Kafka settings is described by following YANG model:"]},{"i":"installationuninstallation-of-multiple-devices-in-one-rpc","l":"Installation/Uninstallation of multiple devices in one RPC","p":["Added RPCs for installation or uninstallation of multiple devices in the single RPC call. The advantage of this approach in comparison to install-node/uninstall-node RPC is that UniConfig can schedule installation tasks in parallel.","Up to 20 devices can be installed at once.","API","Added RPCs into connection-manager YANG module:"]},{"l":"Added list of node-ids into snapshot-metadata","p":["Added list of node-ids, that are inside particular snapshot, into snapshot-metadata.","API","Added ‘nodes' leaf-list (’snapshot-manager.yang'):"]},{"i":"api","l":"\uD83D\uDCBB API","p":["Added following element into calculate-diff RPC output:","Added checkForReferences query parameter.","Default value is false - if it is set to 'true', then validation is done before application of modification into data-tree."]},{"l":"Introduction of transaction idle-timeout","p":["Idle timeout is more useful/practical than existing ‘absolute’ timeout, especially for long-running workflows - it will minimise the chance that transaction will be dropped after some operation started.","Transaction idle timer is refreshed after transaction is retrieved from registry (-> at invocation of some operation from RESTCONF).","Timed-out transaction is cleaned using existing cleaner.","Idle timeout is configurable only globally (config file).","Absolute timeout is not removed - it coexist with added idle-timeout."]},{"i":"configuration-1","l":"Configuration","p":["Updated configuration section in lighty-uniconfig-config.json- added 'transactionIdleTimeout’ property:"]},{"l":"Install-node RPC","p":["Added new parameters (uniconfig-config:crypto) into install-node RPC:","'uniconfig-config:crypto' - It allows to specify path to public key on device - ‘public-key-path’ (leaf with RFC-8040 path) and cipher type (by default, RSA is used) - ‘public-key-cipher-type’. If path to public key is specified and it exists on device, then Global-device encryption model is used. Otherwise, Global-only encryption model is selected.","'netconf-node-topology:yang-module-capabilities' - If auto-loading of YANG module with encrypted paths is not used and device itself doesn’t specify encrypted leaves, then it is necessary to side-load YANG module with encrypted paths. This parameter is relevant only on NETCONF nodes. Side-loaded modules must be expressed in the format of NETCONF capabilities."]},{"i":"configuration-2","l":"Configuration","p":["Global RSA key-pair is stored inside PEM-encoded files in the ‘rsa’ directory under UniConfig root. Name of the private key must be ‘encrypt_key’ and name of the public key must be ‘encrypt_key.pub’. If user doesn’t provide these files, UniConfig will automatically generate its own key-pair with length of 2048 bits. All UniConfig instances in the cluster must use the same key-pair.","Encryption settings are stored in the ‘config/lighty-uniconfig-config.json’ file under ‘crypto’ root object.","'encryptExtensionId' - If this setting is not defined, then encryption is disabled despite of other settings or install-node parameters. The value must have the format [module-name]:[extension-name] and specifies extension used for marking of encrypted leaves/leaf-lists in YANG modules. Corresponding YANG module, that contain this extension, can be part of device/unistore YANG schemas or it can be side-loaded during installation of NETCONF device as imported module from ‘default’ repository.","'netconfReferenceModuleName' - Name of the module for which NETCONF client looks for during mounting process. If UniConfig finds module with this name in the list of received capabilities, then it uses its revision in the lookup process for correct YANG module with encrypted paths (using deviations).","'netconfEncryptedPathsModuleName' - Name of the module which contains deviations with paths to encrypted leaves/leaf-lists. There could be multiple revisions of this file prepared in the ‘default’ NETCONF repository. NETCONF client in the UniConfig chooses the correct revision based on ‘netconfReferenceModuleName’ setting. Together, ‘netconfReferenceModuleName’ and ‘netconfEncryptedPathsModuleName’ can be used for auto-loading of encrypted paths for different versions of devices."]},{"l":"Uniconfig-client API","p":["Added InstallDeviceWithEnabledEncryption example:"]},{"i":"supported-ordered-listleaf-list-operations-restconf--netconf","l":"Supported ordered list/leaf-list operations (RESTCONF & NETCONF)","p":["RESTCONF RFC-8040 supports 2 additional query parameters for PUT and POST methods - ‘insert' and 'point’, see:","RFC 8040 - section 4.8.5","RFC 8040 - section 4.8.6","Using these parameters, it is possible to place list entry to specific position in the list. The 'insert' query parameter can be used to specify how an item should be inserted within an list or leaf-list. The 'point' query parameter is used to specify the insertion point for an item that is being created or moved within an 'ordered-by user' list or leaf-list. Like the 'insert' query parameter.","In the NETCONF client, UniConfig uses edit-config 'insert' attribute to put list entry to the specific position, see:","RFC 6020 - YANG"]},{"l":"API","p":["Introduction of schema for keeping information about the latest YANG repository identifier.","It is configurable using RESTCONF."]},{"i":"introduction-of-rename-patch-operation","l":"Introduction of 'rename' patch operation","p":["This PATCH operation can be used for changing values of one/multiple keys that identify some list entry. In the RESTCONF API it was not possible to directly update values of keys.","New PATCH operation with identifier 'rename'.","‘target’: identifier of original list entry","'point': new identifier of list entry"]},{"l":"Separate UniConfig errors to more type","p":["Updated 'frinx-type' YANG module (previously there were processing-error and no-connection error types)."]},{"i":"implementation-of-rfc-8072-patch-operation-1","l":"Implementation of RFC-8072 PATCH operation","p":["Example:"]},{"i":"added-missing-protocols-to-l2-for-ios-xe-cli-units-1","l":"Added missing protocols to L2 for IOS XE (cli-units)","p":["Added enumerations into 'frinx-cisco-if-extension' YANG module (openconfig):"]},{"l":"YANG packager","p":["implemented tool for validation and loading of YANG repository","API:","User can find corresponding script it in the utils/ directory (part of distribution).","Script './convertYangsToUniconfigSchema' contains four arguments. Each one has its own identifier so user can use any order of arguments.","Two arguments are required, namely the path to resources that contain YANG files and the path to the output directory where user wants to copy all valid YANG files. Other three arguments are optional. First one is the path to the \"default\" directory which contains some default YANG files, second one is the path to the \"skip-list\" and last one is a \"-to-file\" flag, which user can use when he wants to write a debug output to file.","-i /path/to/sources - required argument. User has two options for where the path can be directed:","to the directory that contains YANG files and other sub-directories with YANG files","to the text-file that contains defined names of directories. These defined directories have to be stored on the same path as text-file.","-o /path/to/output-directory - required argument. User can define path where he wants to save valid YANG files. Output directory must not exist.","-d /path/to/default - optional argument. Sometimes some YANG files need additional dependencies that are not provided in source directories. In this case it is possible to use path to the 'default' directory which contains additional YANG files. If there is this missing YANG file, YANG packager will use it.","-s /path/to/skip-list - optional argument. User can define YANG file names in text file that he does not want to include in conversion process. This file must only contain module names without revision and .yang suffix.","-to-file - optional argument. When user uses this flag, then YANG packager also saves the debug output to a file. This file can be found on a same path as 'output-directory'. It will contain suffix '-info' in its name. If the output directory is called 'output-directory', then the file will be called 'output-directory-info'."]},{"l":"UniConfig notifications about RESTCONF requests","p":["Publishing all RESTCONF traffic into PostgreSQL ‘notification' relation and Kafka 'restconf-notifications’ topic.","API","Created YANG model for RESTCONF notifications:","Configuration:"]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"Fixed UniConfig rollback for CLI devices","p":["Rollback operation after failed commit, that included some CLI devices, was not working at all.","Fixed by re-implementation of the rollback process."]},{"l":"Filtering operational data from read NETCONF device configuration","p":["There are some devices that report both configuration and operational data via gRPC even if UniConfig reads only configuration data.","Fixed by explicit removal of operational data elements from read configuration before writing this configuration into database."]},{"l":"Fixed capturing of command response from Telnet session","p":["The size of internal buffer was hard-coded - now it is flexible based on number of received bytes from Telnet session. It caused trimming of command output in the execute-and-read RPC response."]},{"l":"Fixed deadlocks caused by superfluous synchronisation in transaction manager","p":["Synchronisation of component that is responsible for loading/creation/closing of transactions was unnecessary constrained - it resulted in dead-locks, especially when one UniConfig transaction was accessed asynchronously from different threads."]},{"l":"Fixed lost ordering of list elements after reading of some data","p":["If user read both ‘configuration' and ‘operational’ list elements using RESTCONF API (’content=all' query parameter), order of elements was lost during merging of these two sets.","After fix, configuration elements are displayed first, then operational-only elements are displayed."]},{"l":"Fixed interrupted ping command executed by Device Discovery service","p":["If user executed device discovery RPC with more IP addresses than the capacity of internal thread pool, some scheduled ping tasks were cancelled by timeout process.","Removed timeout from thread pool - tasks wait in the queue without time limit."]},{"l":"Fixed deadlock between transaction closing and UniConfig operation","p":["Procedure for closing transaction is called either explicitly using close-transaction RPC or automatically from transaction cleaner.","If at the same time some transaction is used in the invoked UniConfig operation, then it may lead to the deadlock - using transaction that was expired and is being closed.","Fixed by synchronisation of there events in the transaction manager."]},{"i":"get-template-info-operation-must-be-part-of-read-only-transaction-uniconfig-client","l":"Get-template-info operation must be part of read-only transaction (uniconfig-client)","p":["This operation was only part of read-write transaction."]},{"i":"when-notifications-are-enabled-uniconfig-log-is-getting-filled-with-psqlexception-continuously","l":"When notifications are enabled, uniconfig log is getting filled with PSQLException continuously","p":["Subscription table was not locked in the loop used for acquiring free subscription to NETCONF streams. Instead, pg_locks system view was locked. It led to various issues with permissions.","Fixed by not locking instances in the pg_locks view, but only instances in the subscription table."]},{"l":"Installation of device with bad password getting wrong behavior","p":["Error message was not correctly propagated into RPC install-node output.","Fixed - it will contain error message “mountpoint was not succesfully created“."]},{"l":"Fixed ignoring of unknown elements received from NETCONF device","p":["Even if ‘strict-parsing' was set ‘false’, sometimes NETCONF client didn’t ignore unknown elements that were placed under parent node of type 'list'."]},{"l":"Fixed downloading of schemas from NETCONF server running on netconf-testtool","p":["Downloading of schemas from simulated device (netconf-testtool) didn't work at all. User had to provide YANG schemas of simulated device manually to UniConfig ‘cache’ directory."]},{"l":"Fixed JSONB filtering for UniStore topology","p":["JSONB filtering feature didn’t work on configuration under unistore nodes"]},{"l":"Fixed calculate-diff RPC with updated root leaves","p":["Calculate-diff RPC failed if there were some updated/created/removed root leaves."]},{"l":"Fixed disconnecting CLI because of invalid characters in the prompt","p":["If the commands that are executed are too long, an incorrect character will appear which prevents the CLI from processing the prompt and causes the application to hang.","Fixed by ignoring of such characters during parsing of returned command prompts from device."]},{"l":"Fixed closing of UniConfig transaction after failed commit operation","p":["If commit RPC failed unexpectedly (500 status code), then UniConfig transaction was not closed and stayed hanging and blocking other transactions that would do modifications on the same nodes.","Fixed by closing UniConfig transaction always at the end of commit RPC if it was not closed by operation itself."]},{"l":"Fixed handling of incorrect input pagination parameters","p":["Returning 400 error message if input is not correctly formatted.","Example:"]},{"l":"Fixed providing of multiple slf4j bindings on classpath","p":["Keeping only one slf4j implementation on classpath, so there aren’t any conflicts."]},{"l":"Stop closing of configuration mode in the UniConfig shell after each commit operation","p":["State before:","State after:"]},{"l":"Fixed writing of augmentation data at commit operation to southbound layer","p":["This is a regression introduced during implementation of “validation” and “confirmed commit” features. Fixed by wrapping of augmentation nodes to non-mixin parent containers."]},{"l":"Fixed validate RPC output with empty input","p":["After modification of multiple nodes in the transaction, validate RPC with empty input:","Returns back only:","But it must contain all modified nodes."]},{"l":"Fixed ordering of entries in the transaction-log","p":["Committed transactions must be sorted by time when transaction was committed. Previously, the order was random."]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Removed old draft-02 RESTCONF implementation","p":["We stopped using old RESTCONF implementation.","Only new RESTCONF RFC-8040 is supported."]},{"i":"configuration-3","l":"Configuration","p":["Removed “jsonRestconfServiceType“ setting from “lighty-uniconfig-config.json”:"]},{"l":"Removed option to turn off transactions","p":["This setting was confusing, because turned on transactions still support both immediate-commit-model and build-and-commit models."]},{"i":"configuration-4","l":"Configuration","p":["Removed “uniconfigTransactionEnabled“ from configuration file:"]},{"i":"improved-invalid-nesting-of-data-error-message","l":"Improved 'Invalid nesting of data' error message","p":["This error occurred without and descriptive message, if user put some list without specification of correct brackets in the input JSON body.","Improved error message - it points to the place/element at which error occurred (parent element)."]},{"l":"Removed AutoSyncService","p":["This component was responsible for automatic reading of some configuration after pushing configuration to device.","However such process was not very visible to user, it could cause issues - we decided to remove it, so similar functionality must be implemented on application layer."]},{"l":"Specification of default directory in the YANG packager utility","p":["The packager script expected to have ‘default’ as the name of the default directory. It must be able to accept any file name after -d parameter."]},{"i":"separate-uniconfig-errors-to-more-type-1","l":"Separate UniConfig errors to more type","p":["Introduction of more granular error types that are returned in the response messages of UniConfig RPC operations.","User should be able to identify in what component/layer of UniConfig, the error occurred."]},{"i":"enabledisable-notifications-per-topic","l":"Enable/disable notifications per topic","p":["Previously it was only possible to enable/disable notifications globally (all topics).","Added option per topic to enable/disable notifications.","Added 3 new leaves that are placed under “kafka-settings“ container","API","Confiruration","Initial configuration can be specified from lighty-uniconfig-config.json file:"]},{"l":"Renamed elements in notification system","p":["Goal - improved readability.","subscription list → netconf-subscription","topic name restconf-notifications → audit-logs","API","Updated subscription list and YANG module name:","Renamed restconf-notifications module:","Updated topic name for RESTCONF notifications:","Configuration","Updated topic name and corresponding field name:"]},{"l":"Removed AAA","p":["Removed AAA code from UniConfig.","AAA was used for:","RESTCONF authentication (basic) - not needed, it can be provided by application gateway","encryption in NETCONF - moved corresponding functionality to NETCONF module","user identification - not needed, this functionality will be covered by tracing logs","API","Removed “user-id“ from “audit-logs“ module:","Removed “username” from “transaction-log” module:"]},{"i":"uniconfig-shell-ability-to-configure-multiple-leafs-with-single-set-operation","l":"UniConfig shell: Ability to configure multiple leafs with single SET operation","p":["If there are multiple leaves under same container/list, user should be able to configure them in the single command line.","API:","Sample YANG model:","Commands for setting client-alive-interval and client-alive-count-max:","New approach:"]},{"l":"Removing unused UniConfig monitoring system","p":["Removing of following field from UniConfig instance DB relation - backup-instance.","Removing periodical monitoring of UniConfig instances (component in the UniConfig layer) and taking leadership over nodes in the cluster.","Removing unused DB business API services that were used in the [1] and [2].","Configuration","Before changes:","After changes (removed multiple settings):"]},{"l":"Removed old UniStore implementation","p":["UniStore was previously implemented separately from UniConfig. Now it is integrated into UniConfig with distinct topology identifier 'unistore'."]},{"l":"Using cached thread-pool in the device-discovery service","p":["There was a fixed thread-pool that kept all the threads open all the time.","Using cached thread-pool with a small initial thread amount and higher max thread amount e.g. CPU_COUNT * 8."]},{"i":"configuration-5","l":"Configuration","p":["Added “maxPoolSize“ setting to configuration file:"]},{"i":"display-only-sub-structure-with-show-command-in-uniconfig-shell","l":"Display only sub-structure with \"show\" command in UniConfig shell","p":["Before patch:","After patch (just displaying what's there inside settings/system accordingly):"]},{"l":"Providing default UniStore node id in the UniConfig shell","p":["When we create a new UniStore node we manually had to give it a node-id. Say, we are configuring ssh now, it needs to be a generic command which doesn't expect the node-id to be given by the user.","Before patch ('new' is the node identifier):","After patch:","Configuration:","Default UniStore node identifier can be configured in the lighty-uniconfig-config.json (default value is 'system'):"]},{"l":"Removed unused Maven plugins","p":["Removed unused Maven plugins that are executed during build process and thus making building longer."]},{"l":"Removed AspectJ from UniConfig","p":["AspectJ makes code more error-prone and complex for debugging - removed usage of this library in the RESTCONF and dependencies."]},{"i":"documentation-additions","l":"\uD83D\uDCDC Documentation additions"},{"i":"validation-of-leaf-refs-1","l":"Validation of leaf-refs","p":["Validation of leaf-ref YANG constraints that are affected by some create/delete/update operation:","leafref-validation"]},{"l":"idle-timeout","p":["Introduced transaction idle-timeout","Updated configuration section in ‘“lighty-uniconfig-config.json” - added 'transactionIdleTimeout’ property:"]},{"l":"Encryption","p":["UniConfig uses asymmetric encryption for ensuring confidentiality of selected leaf and leaf-list values."]},{"i":"insert--point","l":"Insert & Point","p":["RESTCONF RFC-8040 supports 2 additional query parameters for PUT and POST methods - ‘insert' and 'point’"]},{"l":"Hashing","p":["UniConfig supports 'iana-crypt-hash' YANG model for specification of hashed values in data-tree using type definition 'crypt-hash'."]},{"l":"Templates","p":["Added information about usage of the templates"]},{"i":"rename-patch-oper","l":"Rename patch oper.","p":["This PATCH operation can be used for changing values of one/multiple keys that identify some list entry.","Rename"]},{"l":"Kafka clustering","p":["Random distribution of subscriptions to NETCONF notifications streams and turning on/off UniConfig instances may lead to scenario when one of the UniConfig instances in the cluster contain most of the subscriptions while others unequally smaller number."]},{"l":"YANG Patch","p":["Invocation of PATCH that may contain multiple edits."]},{"i":"uniconfig-whitelist-1","l":"UniConfig whitelist","p":["List of root YANG entities that should be read. This parameter has effect only on NETCONF nodes.","Whitelist"]},{"i":"yang-packager-1","l":"YANG Packager","p":["Implemented tool for validation and loading of YANG repository"]},{"l":"Install multiple nodes","p":["Added RPCs for installation or uninstallation of multiple devices in the single RPC call. The advantage of this approach in comparison to install-node/uninstall-node RPC is that UniConfig can schedule installation tasks in parallel.","Uninstall multiple nodes"]},{"l":"Snapshot-metadata","p":["Added list of node-ids, that are inside particular snapshot, into snapshot-metadata."]}],[{"i":"uniconfig-429","l":"UniConfig 4.2.9"},{"l":"UniConfig","p":["[BUG FIXES]","[IMPROVEMENTS]","[NEW FEATURES]","added GNMi southbound protocol","added node list into snapshot-metadata - it contains information about nodes that are captured using snapshot - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/snapshot-manager/obtain_snapshot_metadata/obtain-snapshot-metadata.html","don't fail dry-run commit if there aren't any changed nodes","fixed behaviour of validate RPC","fixed calculate-diff with changed root leaf","fixed calculate-diff: uniconfig-native branch didn't work fine with updated leaf nodes under choice nodes","fixed comparison and updating of configuration fingerprints(synchronization issues between DB and UniConfig cache)","fixed DeviceDiscovery: parsing of NULL hostname","fixed displaying whole list content using UniConfig shell","fixed dry-run commit - it closed transaction if list of target nodes was empty","fixed replace-conf-with-oper - NullPointerException","fixed transaction leak (CLI shell)","fixed using of UniConfig on machines with less than 4 CPU cores","get-template-info RPC: showing information about all variables in specified template","implementation of git-like diff that shows diff output with git-like marks - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_calculate-git-like-diff/calculate-git-like-diff.html","implemented RPC to verify install status for a set of node-ids - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_check_installed_devices/check-installed-devices.html","improved apply-template RPC: added type safety - application of value to variable with specified type","install-multiple-nodes / uninstall-multiple-nodes (RPC) - option to install/uninstall multiple devices using one request - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_install_multiple_nodes/install-multiple-nodes.html","introduced unistore topology for storing settings / 'dummy' device configuration - supported commit (persistence of unistore nodes), replace-config-with-oper, and calculate-diff operations - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/unistore-api/unistore.html","logging of transaction ID","UniConfig shell - prompt user for commit if they leave config mode after changes were made"]},{"l":"CLI","p":["[NEW FEATURES]","logging CLI request and responses (logging broker) - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/operational-procedures/logging/logging.html#cli-messages","[BUG FIXES]","fixed closing of CLI mountpoint created using lazy CLI strategy","fixed propagation of error message from mount process into install-node RPC output"]},{"l":"RESTCONF","p":["[NEW FEATURES]","immediate commit model - automatic creation of new transaction per user request - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/immediate-commit-model/immediate-commit-model.html","support HTTP2 on server side","[BUG FIXES]","fixed displaying of candidate nodes from non-existing augmentations","fixed unclosed/leaked UniConfig transaction","fixed parsing of multi-level fields query parameter","[IMPROVEMENTS]","making module-name prefix optional in value of fields query parameter"]},{"l":"NETCONF","p":["[NEW FEATURES]","exposed strictParsing parameter into NETCONF mountpoint - ignoring unknown elements received from NETCONF server - documentation: https://gerrit.frinx.io/c/Frinx-docs/+/11724","sorting of list elements by one or multiple fields - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/restconf/restconf.html#sorting","[IMPROVEMENTS]","reducing logs generated by NETCONF cache loader","updated naming of pagination query parameter"]},{"l":"TRANSLATION-UNITS-FRAMEWORK","p":["[IMPROVEMENTS]","sending list size hint to translation unit writers"]},{"l":"CONTROLLER","p":["[IMPROVEMENTS]","logging creation/closing of UniConfig transaction","removed transaction-log limit from database","[BUG FIXES]","handling of errors that occur in readers/writers","fixed reading snapshot-metadata from database","fixed JSONB filtering: parsing of embedded paths"]},{"l":"SWAGGER","p":["[NEW FEATURES]","added option to ignore config nodes in order to produce oper only documentation","added range constraints to leaves","enable Maven swagger generator for uniconfig native models","[IMPROVEMENTS]","removed swagger path generator for old restconf","[BUG FIXES]","fixed description generator for leaves"]},{"l":"NETCONF TRANSLATION UNITS","p":["[BUG FIXES]","re-enabled XR-6 models","fixed XR-6 interface configuration writer (MTU)","[IMPROVEMENTS]","decreased surefire heap to 2G","optimization: stop recreation of NetconfAccessHelper","set max heap to 4G when running unit-tests to avoid outOfMem exception when running tests"]},{"l":"CLI TRANSLATION UNITS","p":["[Huawei]","created units: login banner, HTTP commands, sysname command, VLAN, telnet and ssh, user-interfaces, RADIUS, QoS, ipv6 and traffic-filter commands","fixed: mounting Huawei device","[SAOS6]","created units: local/remote interfaces, deleting VLAN and physical interface","fixed: reading metadata, ordering of commands for adding network instances","improved: the way to determine if the ring is major or sub ring","[SAOS8]","fixed: reading interface sub_ports, reading metadata","[IOS/IOS-XE]","fixed: deleting all service instances, reading metadata, prefix-lists with 0 entries not reconciled ipvpn, handling invalid MTU value, parsing ACL set"]}],[{"i":"uniconfig-428","l":"UniConfig 4.2.8"},{"l":"UniConfig","p":["[NEW FEATURES]","UniConfig shell: basic CRUD operations (configuration/operations mode), RPC calls, YANG actions.","Validate RPC: validation of NETCONF configuration by target device.","Device discovery RPC: searching for open TCP/UDP ports on target hosts ICMP reachability.","[IMPROVEMENTS]","Simplification of UniConfig RPCs in the transaction: RPCs(is-in-sync, commit, checked-commit, replace-config-with-operational, calculate-diff, sync-from-network, dryrun-commit) should work now with empty input. If the input is empty, operation will be invoked on all touched nodes.","[FIXES]","Unified representation of empty snapshot metadata - it will return 404.","Propagation of southbound error message to Uniconfig layer after failed installation."]},{"l":"CONTROLLER","p":["[NEW FEATURES]","Auto-generation of local UniConfig instance name, if it is not set in the configuration file.","[FIXES]","Fixed persistence of templates: fixed extraction of node-id from path.","Fixed omitting of module-name from URI: skip openconfig/native-CLI augmentations from created UniConfig-native schema.","Fixed parent module lookup when resolving leafrefs- parent module was mapped not to parent, but the submodule itself.","Fixed parsing of source-ids from YANG files- don't inherit revision from parent module.","[IMPROVEMENTS]","Improved error message on failed building of schema context.","Optimized YANG schema cache: Removed in-memory schema cache listener that was caching bulky AST form of all sources. Caching of them is not valuable anymore because there is only 1 schema context per device-type."]},{"l":"SWAGGER","p":["[FIXES]","Removed trailing slash from generated URIs (conforming RFC-8040 format).","Fixed importing of 4.0.0-alpha-1-SNAPSHOT (maven-core).","[IMPROVEMENTS]","Stop emitting operational nodes in swagger.","Adding snapshots-metadata and tx-log to generated swagger-api."]},{"l":"CLI","p":["[FIXES]","Fixed initialization of SSH session: Enforced following order of messages in SSH client - Protocol (SSH-2.0-APACHE-SSHD-2.4.0), Protocol (SSH-2.0-Cisco-1.25), Key Exchange Init, Key Exchange Init(some devices don't accept switching Protocol and Key Exchange Init messages).","Fixed setting infinite number of reconnection attempts."]},{"l":"NETCONF","p":["[NEW FEATURES]","NETCONF PKI data persistence: persistence of crypto information in the file-system.","[FIXES]","Capturing error message from SSH session initialization process.","Fixed setting infinite number of reconnection attempts.","Fixed self-reconnection of NETCONF session (issue with keepalive timer).","Fixed netconf testtool in mdsal-persistent-mode - do not share Datastore across all devices.","Fixed overwriting IETF schemas by UniConfig shcemas in netconf-testtool.","[IMPROVEMENTS]","Removed unused netconf-ssh classes.","Improving the way of printing NETCONF reconnection attempts.","Testtool: Enable manipulation of operational data over NETCONF."]},{"l":"RESTCONF","p":["[NEW FEATURES]","Pagination: get-count, limit, and start-index query parameters.","[FIXES]","Fixed adding schema-respoitory parameter to PATCH operation.","Fixed serialization of identityref key value."]},{"l":"CLI TRANSLATION UNITS","p":["[FIXES]","[IMPROVEMENTS]","[NEW FEATURES]","Huawei: Add caching for \"display current-configuration\" command.","Huawei: created TU for AAA.","Huawei: created TU for ACL.","Huawei: created TU for physical, VLAN interfaces, sub-interfaces.","Huawei: created TU for trunk and access VLANs.","Huawei: Read interfaces of Huawei devices with \"display interface brief\".","Huawei: Updated parsing of output for L3-VRF.","IOS XE: Fixed missing some information about route maps for IOS.","IOS XE: Fixed sending \"dot1q 1-4094\" to IOS XE devices.","SAOS6: All interfaces cannot be marked as Ethernet.","SAOS6: Changed name for l2vlan interface to \"cpu_subintf_\" l2vlan name.","SAOS6: Fixed creation of sub-port on EthernetCsmacd interfaces.","SAOS6: Reading all interfaces from ciena devices using command\"interface show\"."]},{"l":"NETCONF TRANSLATION UNITS","p":["[FIXES]","Fixed importing ietf-inet-types - there are multiple revisions available in the UniConfig.","[IMPROVEMENTS]","Speed up device model build by disabling various maven plugins."]},{"l":"OPENCONFIG","p":["frinx-huawei-network-instance-extension - added network-instance extension.","frinx-saos-if-extension - added ipv4 and ipv6 address extension.","frinx-cisco-if-extension - the dot1q value type is changed from int to string and the range is saved as a string.","frinx-acl-extension - ACL for huawei devices","frinx-openconfig-aaa, frinx-openconfig-aaa-radius, frinx-openconfig-aaa-tacacs, frinx-openconfig-aaa-types, frinx-huawei-aaa-extension - added aaa and radius modules from openconfig.","frinx-huawei-if-extension - added yang for huawei interface and sub-interface extensions.","frinx-openconfig-bgp-types, frinx-openconfig-extensions -fixed bug with community set values."]}],[{"i":"uniconfig-427","l":"UniConfig 4.2.7"},{"l":"Uniconfig","p":["[FIXES]","[IMPROVEMENTS]","[NEW FEATURES]","Added UniConfig transaction-id as fingerprint for devices not supporting it.","Adjusted persistence of mount information - node with the same ID may be present in both CLI/NETCONF topologies - and node only from one topology at the same time can be used for installation on UniConfig layer (configuration is synced and parsed).","Changed native-CLI architecture - UniConfig calls native-CLI readers/writers directly using BI API - BA translation layer provided by Honeycomb is redundant.","Fixed calculate-diff - Removing the whole list node with all list entries.","Fixed commit output: if the configuration of one of the nodes fails at any phase, then the outputs for all nodes will always contain a rollback flag.","Fixed creation/removal of dry-run Unified mountpoint - synchronization problems.","Fixed dry-run commit - Dry-run commit should trash journal of nodes that haven't been 'touched'.","Fixed losing of some tags in DOM nodes (application of template)","Fixed reading of uniconfig-native flag - unboxing of null Boolean to boolean.","Fixed rollback operation after commit/checked-commit.","Fixed sync-from-network for unavailable nodes - Comparison of config fingerprints failed for nodes that are unavailable because reading of fingerprint failed.","Fixed transfering of template tag from template to uniconfig topology at apply-template RPC (it should not happen).","Fixed version-drop in copy RPC.","Fixed writing ordered-map nodes during string substitution process(application of template).","Handling reordering of list entries in the calculate-diff - instead of sending delete+replace operations to the southbound layer.","Implementation of get-installed-nodes RPC: used for listing installed UniConfig nodes.","Implementation of revert-changes RPC: reverting transaction that is stored in transaction-log and identified by unique UUID.","Implementation of transaction-tracker (transaction-log): tracking of successfully committed data.","Improved error messages - using serialized form of YangInstanceIdentifier in logs or error messages, if possible.","Improved error messages during application of template.","Improving the existing algorithm that collapses diff from honeycomb(parallel streams).","Integration of fingerprint validation to templates - writing of fingerprint of modified templates to database and verification of fingerprint before commit.","Introduction of install-node, uninstall-node, mount-node, and unmount-node RPCs - a new way to install nodes into UniConfig with split concepts of installation and mounting. Mounting is always done on demand and the mountpoint is alive as long as some transaction is using this mountpoint.","Introduction of UniConfig transactions - dedicated/shared transactions concept: multiple users can use UniConfig safely from isolated transactions. UniConfig RPCs are part of UniConfig transactions - information about transaction-id is passed from the RESTCONF layer into the UniConfig layer.","Making UniConfig instance stateless - data is separated from UniConfig (PostgreSQL database) and UniConfig doesn't keep persistent connection to devices. Data and connection recovery is not done by UniConfig instances anymore (coordination, monitoring, and recovery process is not orchestrated by UniConfig). From the view of data-tree, UniConfig is used only as a cache layer on top of PostgreSQL database and caching is done only in the scope of transaction.","Mark sync operation failed on empty config.","Removed data-tree cache layers on CLI and NETCONF layers - UniConfig directly writes data to CLI/NETCONF mountpoints - it simplifies syncing process too.","Removed snapshot limit - it is not used anymore since snapshots are stored in the database and this database should manage its storage limits.","Removed unused Karaf features.","UniConfig shell prototype: SSH server, RPC operations, simple read operation.","Using commit RPC for committing snapshots and templates.","Using distributed advisory locks provided by PostgreSQL for locking of UniConfig nodes during commit/checked-commit operation. If another transaction perfors commit at the same time, it will fail before execution of the second commit.","Validation of conflicts between different transactions: added data-tree and config fingerprint validation before commit / checked-commit RPC invocation."]},{"l":"Controller","p":["[FIXES]","[IMPROVEMENTS]","[NEW FEATURES]","Added synchronization when generating BA->BI codecs.","Added workaround for 'metadata not available' data-tree bug.","Allow positional information in YangInstanceIdentifier (useful for operations under ordered lists).","Allow users to specify attributes without module-name (template tags).","Breaking PUT modifications to specific modifications in the data-tree: improving 'optimistic lock' granularity.","Ensuring parents by merge: avoiding ridiculous errors when data-tree allows to write data to nodes which parent is missing.","Exposed simple container merge utility.","Extending RPC service by custom parameters that can be passed from RPC caller to RPC implementation.","Fixed creation of DocumentedException from XML (document may include redundant namespaces).","Fixed data-tree modifications: merge->put->delete operation chain.","Fixed disappeared tag from template data-tree.","Fixed leaked DB connection on health-check operation.","Fixed order in which database writers are called (adding priority to DatabaseWriter API).","Fixed race-conditions in 3-phase datastore commit.","Fixed searching for fallback context on nodes that were not mounted(uniconfig-native).","Fixed storing of the default schema repository into PostgreSQL.","Generalisation of NETCONF repository into YANG repository.","Implemented standalone DOM broker - stopping to use clustered/distributed DOM brokers.","Integration of Flyway library to Uniconfig: easier upgrading of database schema and migration of data.","Integration of JSONB filtering of configuration on the level of DAOs.","Integration of UniConfig transaction manager with database and datastore transactions - used for management of shared/dedicated transactions.","Introduction of embedded PostgreSQL for testing purposes - it can be enabled from the UniConfig configuration file.","Making the database layer more thread safe (using 'SELECT FOR UPDATE' in some queries).","Optimized creation of uniconfig-native schemas.","Persistence of logging configuration in PostgreSQL.","Persistence of snapshots in PostgreSQL.","Persistence of templates in PostgreSQL.","Persistence of transaction-log in PostgreSQL.","Preserving order of list/leaf-leaf elements in the data-tree.","Removed unnecessary dependencies of xtend maven plugin.","Removed unused Karaf features.","Replaced asynchronous DB API by synchronous DB API - JDBC connections are synchronous.","Separated persistence of UniConfig nodes and representing mountpoints.","Stop submitting datastore transactions - it must be closed - datastore is used only as cache.","Validation and locking of templates and UniConfig nodes on the level of UniConfig transaction."]},{"l":"Swagger","p":["[FIXES]","Fixed bug caused by swagger-uniconfig-go.","[IMPROVEMENTS]","Make openAPI generated for uniconfig more useful.","Added Unified layer models to swagger dependencies."]},{"l":"Translation units framework","p":["[NEW FEATURES]","Added native-CLI binding-independent API.","[IMPROVEMENTS]","Removed unused artifacts.","Optimized chunk cache - do not store entire writer in chunk cache, so GC can take care of writers as soon as possible.","Detection of complex reordering of list entries in diff output.","[FIXES]","Fixed commit rollback failing: the bug was caused by an attempt to execute an inverse command of an unsuccessful command."]},{"l":"CLI","p":["[IMPROVEMENTS]","Removed unused Karaf features.","Exposed binding-independent data support to native-CLI API.","Exposed services for direct device access to MP.","[FIXES]","Replace maxConnectionAttempts with maxReconnectionAttempts when reconnecting to the device after the first connection attempt is successful.","Replaced transactionChain (not working correctly) with direct dataBroker transactions.","Fixed device type checking - when a device was mounted with the wrong type, the generic symbol (\"\") was implicitly used as the type. The device was installed on all layers, but uniconfig/configuration was empty. Now we have to use correct device type or.","Fixed disabled CLI journaling (default value)."]},{"l":"NETCONF","p":["[NEW FEATURES]","Added maxReconnectionAttempts functionality into NETCONF client.","[IMPROVEMENTS]","Removed unused Karaf features.","Improved error message from parsing of NETCONF RPC response.","Removed akka actor dependency from NetconfCacheLoader.","Enable md-sal persistence accross sessions in NETCONF testtool.","[FIXES]","Fixed writing of netconf namespace prefix ('Namespace urn:ietf:params:xml:ns:netconf:base:1.0 was not bound, please fix the caller').","Fixed reading of the whole list/leaf-list from the device - it was reading the whole parent structure, not only the dedicated list.","Moving state to unable-to-connect after failed schema context building from device YANGs.","status is written to datastore, because mount-node RPC relies on OPER information only.","Fixed deadlock that may occur on removal of Unified MP."]},{"l":"RESTCONF","p":["[NEW FEATURES]","Added support for RESTCONF PATCH method that includes tags.","Integration of UniConfig dedicated/shared transaction to RESTCONF - cookie with transaction-id property, create-transaction RPC, and close-transaction RPC.","Introduction of jsonb-filter query parameter used for filtering of data committed to database.","[IMPROVEMENTS]","Removed unused Karaf features.","Using RFC8040 format for errors thrown from the transaction system.","[FIXES]","Fixed RESTCONF response/request logging.","Fixed reading of all available RPC operations.","Fixed NPE that is caused by Subject.getPrincipal() - extraction of authentication data from AAA.","Fixed serialization of ordered leaf list with attributes.","Fixed connection leak - read-only transaction was not always closed.","Fixed parsing of elements without module name: If there are some conflicts between children elements - multiple elements with the same name but in different modules exist - then we should return a proper error message.","Fixed use of fields query parameters with uniconfig-native nodes."]},{"l":"NETCONF translation units","p":["[IMPROVEMENTS]","Removed unused Karaf features.","[FIXES]","Fixed writer dependency in XR623 ISIS translation unit.","Ignored 'ios-xr lacp period 200' command - only 'lacp period short' is supported."]},{"l":"CLI translation units","p":["[FIXES]","[IMPROVEMENTS]","[NEW FEATURES]","Huawei: additions - global config reader and writer for bgp, neighbor config reader and writer, new augmentation fields for global and neighbor configurations.","Huawei: translation units - interfaces.","IOS XE: added ios-xe 15 and 17 to ios-xe module.","IOS XE: additions - media-type command, port-security commands, BDI type recognition, ethernet cfm mip command, cft commands, commands for bgp, prefix-list command, fhrp delay, bfd-template, split-horizon group in bridge-domain, added fallOverMode for vrf neighbor, IPv6 prefix-lists with prefix lengths, routing-policy, ipv6 vrrp, added synchronization and moved default-information in BGP, table-map, ip community-list command, redistribute command, bgp and interface commands, ipv6 commands, rewrite command, snmp trap, support for multiple l2protocols,.","IOS XE: created a distinct module for IOS-XE in cli-units.","IOS XE: fixed writing interface config, fixed unwanted lldp/cdp/switchport vlan commands commands, fixed IPv6 config writer template, fixed mounting of IOS XE (configuration metadata), fixed bridge-domain regex, fixed reading VLANs, fixed storm-control regex, fixed NPE in GlobalAfiSafiConfigWriter, fixed BgpAfiSafiChecks, fixed CommunityListConfigReader and L3VrfReader, fixed IndexOutOfBoundsException in BgpActionsConfigReader.","IOS XE: make sure all 'GigabitEthernet' interfaces are treated as physical, don't send unnecessary commands in interface unit, only send storm-control commands when needed, moved service instances and encapsulation in service instance in ios-xe/interface, edit readers and writers for bridge-domain, edited LLDP to not parse when default is set, speed up mounting","IOS XE: translation units - SNMP, LACP, privilege command, interfaces, l2protocol, evc, route-map, bgp and network-instance modules, vrf definition, fhrp version, ip commands, neighbor, ethernet cfm mip, negotiation auto.","IOS-XR: delete methods should always be readBefore, fixed calling get on a null value, fixed delete of mpls-te.","Movef service-policy from IOS/interface to IOS/QoS.","Removed unused Karaf features.","SAOS6: fixed virtual-circuit ethernet delete, fixed reading Virtual Ring data, fixed reading the range of vlans in virtual ring commands, reading default interface.","SAOS6: translation units: Ingress ACL.","SAOS6: use the same template for service as for profile schedulers.","SAOS6/8: added quotes into description.","SASO6: additions - commands for delete untagged attributes, unset description command, parsing ranges in ring protection.","SONiC: created init and interfaces unit."]},{"l":"Openconfig","p":["created frinx-openconfig-evc module","created frinx-privilege module","fixed Openconfig bug with nested augmentations (fixed resolving augmentations path)","frinx-bfd-extension: bfd-template-config","frinx-bgp-extension: added bgp extension for Huawei device, local-as-group, route-maps in redistribute commands, BGP neighbor, table-map in BGP, synchronization and moved default-information in BGP, added bgp fall-over mode, neighbor as-override, default-information originate,","frinx-cisco-if-extension: added negotiation auto, added support for multiple l2protocols, added support for rewert commands, vrf forwarding, ip commands, fixed L2protocol description, split-horizon group in bridge-domain, chaed bridge-domain type to string, fhrp delay, fixed bad order of augmentation in frinx-cisco-if-extension.yang, bridge-domain, added grouping for L2protocol for Service instance, added grouping for L2protocol for Service instance, move encapsulation in service instance, move service instances, created augmentation for service instances, cft cisco specific commands, added port-security,","frinx-cisco-ipvsix-extension: added yang extension for global ipv6 commands.","frinx-cisco-routing-policy-extension: prefix lengths in prefix-list, sequence-id, forwarding-action, route-map","frinx-cisco-vrrp-extension: added ipv6 vrrp augmentation, added vrrp-group augmentation,","frinx-oam: added ethernet cfm mip","frinx-openconfig-bgp-policy-extension: added community-list type,","frinx-openconfig-bgp-types: extracted typedefs for community union type.","frinx-openconfig-fhrp: fhrp version","frinx-openconfig-lacp: added ON lacp mode","frinx-qos-extension: moved service-policy from IOS/interface to IOS/QoS","frinx-snmp: added snmp-view config","removed unused Karaf features from openconfig"]}],[{"i":"uniconfig-426","l":"UniConfig 4.2.6"},{"i":"uniconfig","l":"UniConfig:","p":["new feature: introduced 3-phase commit - integration of validation and confirmed-commit features - here","new feature: templates can be used for reusing of some configuration and afterwards easier application of this configuration into target UniConfig nodes - storing of templates in UniConfig, modification of templates including tags using RESTCONF operations, and application of templats to target UniConfig nodes using apply-template RPC","new feature: added copy-subtrees RPCs - merge or replace whole subtrees: copy-one-to-one, copy-one-to-many, copy-many-to-one","new feature: added calculate-subtree-diff RPC - calcution of diff between two subtress in datastore","new feature: implemented uniconfig healthcheck - RPC checks UniConfig and database connection","fixed auto-sync service","fixed creation of Unified mountpoint for CLI device without available translation units - using only 'generic' units in this case"]},{"i":"controller","l":"CONTROLLER:","p":["improvement: removed 'native_prefix' from 'node' database relation - it is replaced by NETCONF repository name","fixed MDSAL union codec - it didn't work with boolean subtype"]},{"i":"cli","l":"CLI:","p":["fixed unmounting of CLI device: the case when mounting process hasn't successfully finished yet"]},{"i":"netconf","l":"NETCONF:","p":["new feature: NETCONF validate RPC and confirmed-commit RPC exposed by extension of DOM transaction","improvement: mounting NETCONF device with explicitly set NETCONF repository name that must be used - using this approach, it is not necessary to explicitly override/merge capabilities in the mount request - here","improvement: replacing uniconfig-native fingerprint by'schema-cache-directory' in NETCONF operational data","fixed mounting SROS device with specified ignoreNodes/namespaceBlacklist - here","fixed: unmounting of NETCONF device which mounting process hasn't finished yet","fixed: increased maximum NETCONF chunk size to 32*1024*1024"]},{"i":"restconf","l":"RESTCONF:","p":["new feature: introduced 'uniconfig-schema-repository' query parameter - explicitly set name of the schema using which input/output data is validated","new feature: JSON attributes - option to encode XML-like attributes into JSON structure: - here"]},{"i":"cli-translation-units","l":"CLI TRANSLATION UNITS:","p":["IOS: fix - QoS translation unit, added port-channel into interface type","IOS: added translation units - storm-control, standard ACL","IOS: refactoring - allowed vlans on trunk interface","SAOS: fixed translation units - statistics augmentation, command ordering, ethernet config reader/writer, ordering of VLAN and VC, order of CPE commands","SAOS: fixed initialization - committing configuration during initialization"]},{"i":"openconfig","l":"OPENCONFIG:","p":["frinx-acl-extension: added support for standard ACL","moved statistics from frinx-saos-vlan-extension to frinx-saos-vc-extension","frinx-cisco-if-extension: added storm control","frinx-qos-extension: extended and fixed support for IOS QoS"]},{"i":"known-issues","l":"Known Issues:","p":["The error message needs to be fixed to inform user about the name clash and how to fix it","ODL did not started if cache folder for SROS16 device is applied","BGP: NullPointerException occurs when configure network instances for XE","NETCONF: Junos 18 is can't be mounted by netconf Xrv6.2.3 device has been locked and session went down after specific set of commands","CLI: Performance issues when is more than 400 devices connected","RPC: Commit and Checked commit issues when invalid configuration has been applied to one router Transaction has been locked during checked commit no rollback when invalid configuration has been configured to one router"]}],[{"i":"uniconfig-425","l":"UniConfig 4.2.5"},{"i":"uniconfig","l":"UniConfig:","p":["new feature: show-connection-status RPC: it can be used for verification of status of selected nodes on CLI, NETCONF, Unified, and Uniconfig layers - here","new feature: filtering of data that is read from NETCONF mountpoint based on YANG extension that can be placed in the mount request ('uniconfig-config:extension' parameter) https://docs.frinx.io/frinx-odl-distribution/oxygen/user-guide/network-management-protocols/uniconfig_mounting/mounting-process.html#example-mounting-of-uniconfig-native-netconf-device","new feature: is-in-sync RPC: verification if UniConfig Operation datastore is in sync with device - here","new feature: introduced 'install-uniconfig-node-enabled' mount request parameter - option to not install node in the Unified and UniConfig layers - node would be installed only in the southbound layer - here","new feature: introduced uniconfig-native translation units used for reading and parsing of only configuration fingerprint","improvement: calculate diff for uniconfig-native nodes diff output shows difference also on the level of leaves and leaf-lists(better granularity)","fixed setting of maximum snapshot limit (passing 0 in input)","fixed uniconfig-native - mounting node using CLI and afterwards using NETCONF uniconfig-native didn't work as expected","fixed caching of read operational data: improved performance for nodes that are mounted via NETCONF translation units"]},{"i":"cli","l":"CLI:","p":["new component: creation of CLI flavour for SAOS devices for successfull reading and parsing of device configuration","new component: \"one-line-parser\" CLI parsing engine that uses grep function for parsing running-configuration","fixed synchronization of UniConfig operations (for example, commit RPC) and CLI RPCs (for example, execute-and-read)"]},{"i":"netconf","l":"NETCONF:","p":["new feature: added support for invocation of YANG 1.1 actions and TAILF actions - here","new feature: NETCONF edit-config test option - controlling validation of sent edit-config messages on NETCONF server - here","new feature: introduced 'default' NETCONF cache repository that can be used for side-loading of missing/fixed YANG schemas that are invalid/not provided by NETCONF device - here","new feature: introduced logging of whole NETCONF communcation - per-device NETCONF messages, notifications, and system events - here","improvement: added NETCONF cache directory (NETCONF repostory) into Operational datastore of NETCONF node","fixed authentication in NETCONF testtool (key-pair provider)","fixed parsing of NETCONF replies that contains multiple RPC errors(severity of error was not correctly considered)","fixed creation of NETCONF mountpoint - it was not blocking, so higher layers haven't caught events in the correct order","fixed loading of NETCONF cache repository into Operational datastore","synchronization issues","fixed propagation of user-friendly error messages from NETCONF layer into UniConfig RPC output"]},{"i":"restconf","l":"RESTCONF:","p":["new feature: subscription to NETCONF device notifications via websockets - here","new feature: invocation of YANG 1.1 actions and TAILF actions - here","new feature: invocation of PLAIN PATCH operation - here","new feature: schema filtering based on YANG extensions and deprecated YANG statement - reading and modification of data - here","new feature: introduced logging of whole RESTCONF communcation with option to hide fields with selected YANG type - here","improvement: improved RESTCONF error messages in case of invalid URI - displaying possible children nodes","fixed reading of whole list under augmentation/choice node"]},{"i":"controller","l":"CONTROLLER:","p":["new feature: introduced PostgreSQL persistence system for UniConfig nodes: persisting node configuration and NETCONF repositories into DBS with recovery system in the cluster - here","upgrade: using TrieMap dependency for data-tree implementation"]},{"i":"distribution","l":"DISTRIBUTION:","p":["added support for Java 11: compilation of all projects using JDK 11 and also running of UniConfig distribution using JRE 11","fixed invocation of UniConfig with \"--help\" argument","changed logging framework from log4j to logback","added \"--debug\" parameter for opening debug session"]},{"i":"translation-units","l":"TRANSLATION UNITS:","p":["fixed invocation of subtree writers based on wildcard path"]},{"i":"netconf-translation-units","l":"NETCONF TRANSLATION UNITS:","p":["XR6: added L3VPNIPV4UNICAST afi-safi type","XR6: fixed BGP neighbor reader","JUNOS17: fixed LACP units"]},{"i":"cli-translation-units","l":"CLI TRANSLATION UNITS:","p":["SAOS: create readers and writers for logical-ring","SAOS: fixed sending of commit command, parsing of port range, dependencies between writers, parsing of connection point key, interface subport writer, registering of interface writer, hardening update commands, L2VSICP writer, getAllIds in PortReader","IOS: added translation units: QoS, interface statistics, service-policy, VLAN, routing-policy","IOS: modified translation units: added next parameters into BGP, switchport mode options: dot1q && access, BGP neighbor version, SPEED parameter, ICMP type into ACL entry","IOS-XR: fixed LACP bugs: 'mode on' configuration is now explicit, subinterfaces were wrongly added to list of LAG interfaces","Arista: added init unit","Cubro: added CLI flavour"]},{"i":"openconfig","l":"OPENCONFIG:","p":["frinx-qos-extension: added support for CoS and DSCP in QoS","frinx-cisco-if-extension: added switchport mode options: dot1q, access","frinx-bgp-extension: added BGP neighbor version support","frinx-if-ethernet-extension: added interface SPEED parameter","frinx-cisco-if-extension: added port-type, snmp-trap-link-status, switchport-mode, switchport-access-vlan, switchport-trunk-allowed-vlan-add, ip-redirects, ip-unreachables, ip-proxy-arp, service-policy","created SAOS model extension (frinx-saos-virtual-ring-extension)","created Cisco BGP model extension (frinx-cisco-bgp-extension)","fixed frinx-bgp-extension YANG","fixed auto-generated yang docs"]},{"i":"known-issues","l":"Known Issues:","p":["The error message needs to be fixed to inform user about the name clash and how to fix it. ODL does not start if cache folder for SROS16 device is applied","BGP: - NullPointerException occurs when configure network instances for XE","NETCONF: - Junos 18 is can't be mounted by netconf - Xrv6.2.3 device has been locked after specific set of commands","CLI: - Performance issues when is more than 400 devices connected"]}],[{"i":"uniconfig-424","l":"UniConfig 4.2.4"},{"i":"uniconfig","l":"UniConfig:","p":["Added uniconfig node status- each node is in one of these states: installing, installed, failed","Added unified node status- each node is in one of these states: installing, installed, failed","bugfixing"]},{"l":"UniConfig Native","p":["UniConfig Native for CLI- new experimental feature allowing to communicate with devices in a native way using hand-written YANG models","Added sequence-read-active param- this forces UniConfig to read root configuration elements sequentially."]},{"l":"CLI","p":["Introduced RPC execute-and-expect- It is a form of the‘execute-and-read’ RPC that additionally may contain ‘expect(..)’ patterns used for waiting for specific outputs/prompts. It can be used for execution of interactive commands that require multiple subsequent inputs with different preceding prompts.","Introduced Tree-parser as CLI parsing strategy- device configuration is parsed into a tree. It provides faster lookup operations for reads.","Introduced native CLI- feature allows to define YANG models instead of translation units. YANG models need to be created based on device specific CLI commands"]},{"l":"OpenConfig","p":["added various extensions for Ciena TUs"]},{"l":"NETCONF","p":["bugfixing"]},{"l":"Translation units","p":["Added CLI translation units for Ciena SAOS6 and SAOS8","bugfixing"]}],[{"i":"uniconfig-423","l":"UniConfig 4.2.3"},{"i":"uniconfig","l":"UniConfig:","p":["create Lighty based distribution- removal of Apache Karaf altogether, this distribution is based on lighty.io","RPC input/output rework","Unification of RPC inputs/outputs","Prevent any network wide operations if no node id has been passed- All RPCs MUST specify node-id of nodes they are affecting","new UniConfig transactions- create-transaction, cancel-transaction are used in HA deployments","bugfixing"]},{"l":"UniConfig Native","p":["separate schema contexts based on device type- it allows to mount devices with same YANG models but different revisions"]},{"l":"Lighty","p":["adding of AAA support","adding of TLS support"]},{"l":"RESTCONF","p":["update to RFC-8040 based RESTCONF- only this version runs by default","usage of schema context based on device type for data parsing","creation of custom UniConfig JSON/XML parsers/serializers"]},{"l":"OpenConfig","p":["added models: ipsec, frinx-if-ethernet-extension","added various extensions for Brocade TUs"]},{"l":"NETCONF","p":["run-time loading of netconf cache repositories","division of netconf cache based on device type","creation of schema context from netconf-cache","bugfixing"]},{"l":"Translation Units","p":["bugfixing"]},{"l":"Known Issues","p":["JSON response for GET snapshots of UniConfig-native nodes contain generated prefix \"uniconfig--\" (e.g. native-529687306-Cisco-IOS-XR-ifmgr-cfg:interface-configurations). This issue does not have an impact on RPC replace-config-with-snapshot."]}],[{"i":"uniconfig-508-release-notes","l":"Uniconfig 5.0.8 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Created TU for Arris(CER) device","Install-node without mounting/syncing configuration from device","Option to divide OpenAPI files into modules"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Uniconfig-client: increased default HTTP response read timeout","Fixed NETCONF connection timeout","Fixed number of NETCONF reconnection attempts","Fixed waiting for NETCONF dry-run mountpoint","Fixed reading of default NETCONF parameters","Added 'get-template-info' RPC to oper mode (shell)","Huawei install DB parsing issue","Fixed memory visibility issues in MountpointRegistry","Fixed parsing junos xml configuration","Fixed parsing xml configuration with reordered lists items","Fixed list of available RPCs in UniConfig Shell"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Optimization of calc-diff RPC after replace-config-with-oper RPC","Install-node without mounting/syncing configuration from device improvements","Added missing attributes to SAOS6 Interface","Remove OSS index checks from owasp","Generate release notes during merge job"]}],[{"i":"uniconfig-509-release-notes","l":"Uniconfig 5.0.9 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implemented dedicated device sessions","Implementation of device locking states","Implementation of speed attribute for saos6 and saos8 (#21)","Expose kafka producer settings into java client","Added option to use list key delimiter in URI","Implementation of pm instances for port queue groups in saos8 (#11)","Expose kafka producer settings","Implementation of default vlans for saos6 (#10)","Implementation of auto-neg attribute for both saos6 and saos8 (#9)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed ordering of data inside transaction on SONiC device","Fix ignoring empty key","Fix serialization of keyDefinitions","Cleaned and fixed locking of nodes in uniconfig RPCs","Fixed generation of NETCONF message-id","Fixed JSOB filtering - creation of jsonpath and parsing output"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Updated writer template for saos6 and saos8 (#32)","Implementation of speed attribute for saos6 and saos8 (#21)","fix showing list entries in cli suggestions"]}],[{"i":"uniconfig-5010-release-notes","l":"Uniconfig 5.0.10 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed removing native prefix from snapshots (#57)","Fixed parsing GNMi GET response (augmentation content)","Fixed parsing result from immediate-commit model","Fixed lost list ordering after apply-template RPC"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Add a parameter so empty GET response returns 204"]}],[{"i":"uniconfig-5011-release-notes","l":"Uniconfig 5.0.11 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Remove namespace from response (#77)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["uc shell: update simple value and list value simultaneously","uc shell: transaction log ordering","VHD-162 Fixed Issue with With-Defaults param. (#68)","Serialization of int64, uint64, decimal types as string type","Changed order of executing remove and add vlans for saos6 (#75)","Fixed NETCONF reconnection attempts after connection timeout","Removed parent-node-id from NETCONF layer","Fixed synchronization of NETCONF session timeout"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Fixed ordering of data inside transactions for SONiC device","Implementation of common commands of relay agent for saos8 (#78)","Implementation of common commands of relay agent for saos6 (#70)","UC shell: autocompletion of nodes (#36)","Fix parallelism in apply-template RPC (#73)","Implementation of relay-agent sub-port command for saos8 (#47)","Changed default value of content query parameter to 'config'","Add sshd package to logback.xml with INFO level (#67)"]}],[{"i":"uniconfig-5012-release-notes","l":"Uniconfig 5.0.12 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Inclusion of unhide query parameter in PUT/POST/PATCH requests","Sync-to-network RPC"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Swagger - remove input container (#113)","Fix unhide param for write operations","Fixed default value of speed in ios (#95)","Changed isEmpty to null check (#96)","Fixed default value of speed in ios-xe (#88)"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger - Fix authorizations (#114)","change behavior of execute-and-expect RPC (#112)","Swagger - configurable description and toggle servers","Enabled TCP keepalive mechanism in JDBC connection","Added 'database-connection-client-port' to 'transactions-data'","Swagger - generating adjustmens","Using hideEmptyDataNodes parameter per request (#94)","updated write template of interface config for ios xe devices for use-cases where no changes are requested from the user (#91)","Improve schema context caching for gnmi devices"]}],[{"i":"uniconfig-5013-release-notes","l":"Uniconfig 5.0.13 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Add jsonb-filter in UC client","Automation of adding release notes to documetation","API for bulk addition of templates","Implementation of callbacks","Implementation of publishing shell notifications to kafka","JSONB filtering core","Upgrade-from-network as part of sync-from-network"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Cancellation of initial NETCONF RPCs after request timeout","Fixed parsing XML-endoded leaf with instance-identifier to list","Fixed synchronization of notification listeners","Releasing subscription that is bound to tangling mountpoint","Fixed construction of output with set with-defaults param","Fix gnmi unknown augmentations"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Implementing HideAttributes query-parameter per request. - Introduced query parameter HidesAttribute. Default value is 'false'. Hides all composite data-tree nodes attributes to the GET response.","Stop acquiring subscription that was released in the same iteration","Fixed and refactored DOMMountPointService implementation","Add support for template leaf hashing","Improved code and API of create-multiple-templates RPC","Implemented frinx-types:json-element in the JSON deserializer","Swagger - Grouping requests","Swagger - Remove patch operation","Bump Mockito and get rid of Powermock","Swagger: inclusion of action endpoints","YangPackager does not catch broken submodules","Refresh schema context for netconf southbound if device was upgraded","Make mountpoint service call listeners from different thread"]}],[{"i":"uniconfig-5014-release-notes","l":"Uniconfig 5.0.14 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Added a flag to disable confirmed-commit phase in commit RPC (#181)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Adding 'rsa_' prefix to encrypted data","Changed the way to get config metadata for ios xe devices","Disable html escaping in callbacks output","Fix adding release notes to documentation repository","Fix Flyway when using SSL encryption","Fix of incorrect UC behavior when on limit with DB connections","Fix parsing of sslPassword parameter","Fix setting of DbConnectionConfig parameters","Fixed creation of aug with admin-state leaf","Fixed detection and recovery from cyclic dependency error in YANGs (#161)","Fixed duplicate module lookup in path deserializer (RESTCONF) (#150)","Fixed encryption (#170)","Fixed parsing NETCONF action response","Fixed recovery of Cipher object","Fixed SAOS Qos TU writer","Jsonb-filter multiple schemas bugfix","Make tailf:info revision independent","Updated config metadata pattern in reader for ios xe devices (#174)"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Internal delete for trunk-vlans","JSONB-filter improvement"]}],[{"i":"uniconfig-5015-release-notes","l":"Uniconfig 5.0.15 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implementation of bulk-edit RPC"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixes for trunk-vlans handling"]},{"i":"api","l":"\uD83D\uDCBB API","p":["Defined API for bulk-edit RPC"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Do not use common fork join pool in DOMMountpointService"]}],[{"i":"uniconfig-5016-release-notes","l":"Uniconfig 5.0.16 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Added create-multiple-templates RPC to uniconfig-client","Added option to specify tags in create-multiple-templates RPC","Swagger: Grouping of RPCs and tailf:actions","Encrypt/Decrypt of password for gnmi/netconf/cli topologies"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Data-change-events: make node-id optional","Displaying the default unistore node in shell for callbacks","Fix Guava dependency in POM","Fix replace-list ordering","Fix using ignoredNamespaces from swagger-config","Fixed merging template tags into merge node in the same TX","Fixed parsing nodes with attributes","Fixed regex for trimming path in TransactionTrackerUtils (#241)","Fixed submitting changes to database if there are failed nodes and do-rollback is false (#227)","Fixed writing of unkeyed list entry node","Remove duplicate yang models","Removed non-working option to create unistore node in request and show states","Revert \"Install-node without mounting/syncing configuration from device\"","Swagger - fix list container wrapping","Swagger: Fix behavior of basePath and server generation","Swagger: Fix top level containers not generating","Wrap requests to make them RFC 8040 compliant"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Adjusted logging of keepalive messages (stream sessions)","Made amount of max parallel installs customizable (#204)","Made error message user-friendly when user enter nonexistent node-id","Make create-multiple-templates RPC 'atomic'","Mapped \"gig\" to SpeedType.Gigabit","Netconf rpc timeout (#257)","Optimization of callbacks","Refactored stream writers used by RESTCONF","Removed last pieces of CheckedFuture and old unused MDSAL-API","Removed unused jettison dependency","Replaced CheckedFuture by FluentFuture - DOM read transaction","Replaced CheckedFuture by FluentFuture - DOM store read transaction","Replaced CheckedFuture by FluentFuture - DOMRpcImplementation","Replaced CheckedFuture by FluentFuture - DOMRpcService","Replaced CheckedFuture by FluentFuture: TX submit()","Resolving CVE security issues between level 6 and 0","Set UNICONFIGTX cookie to entire domain not just /rests/","Specified create/close-transaction RPCs in YANG","Support upgrading of YANG repository content (#233)","Suppress CVE-2022-38752","Suppressing logs generated by received unknown requests (SSH)","Used FluentFuture in binding ReadTransaction"]}],[{"i":"uniconfig-5017-release-notes","l":"Uniconfig 5.0.17 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Swagger - filter path CRUD customization","Implemented invalid schema repository cleaner","Units-coverage RPC"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Commit fails if one of the touched node was uninstalled","Swagger - fix CRUD filter generation","Triggered commit hook if revert is successful","Fixed decryption of passwords in CLI layer (#301)","Add support for fetch=count with jsonpath filtering (#282)","Fix suppressed Jackson CVEs","Bump Apache commons-text to 1.10.0","Swagger - Fix GET operation generation for list nodes","Bump protobuf-java to 3.21.7","Changed order of executed commands for saos8 (#280)","Additional fix for dce global subscription in client.","Improve JSON parser error message","Changed the way of getting vlan name and egress-tpid","Localhost throws 500","Swagger: Fix operational API generation"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Replacing parallelStreams with more predictable alternatives","Swagger - add option to disable GET request generation for concrete list nodes","Add request timeout to gnmi session","Create modulesWithIgnoredNamespace list","Support install-multiple-nodes for gnmi","Swagger: Add content query parameter"]}],[{"i":"uniconfig-5018-release-notes","l":"Uniconfig 5.0.18 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implemented TU for collecting all the information in NTP MCS for Saos6 and Saos8 (#420)","Implemented low priority fields for collecting inventory for SAOS6 CEN(ring) (#341)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Swagger: Fix generation of arguments in path","Fixed cleanup of existing MP during mount-node RPC","Swagger: fix generation of choice nodes","Fixed encryption of sensitive info passed via template variables (#326)","fix Jsonb filter element filtering","Swagger - Fix request generation","Fixed passing leaf-list in shell callbacks"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Fix Logback and cleanup POMs","Swagger: Custom operational path","Improved reporting of parsing issues in RESTCONF","Swagger - Improve start file","Changed order of some commands for SAOS8 (#348)","Swagger: generate augmentations in respective modules","Expose request-timeout parameter","Bump dependency-check to 7.3.0","readEntireConfig toString returns plain content","Reading mount configuration in the uniconfig-client"]}],[{"i":"uniconfig-5019-release-notes","l":"Uniconfig 5.0.19 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Fixed hardcoded part and added one more if condition to receiver transceiver data for IOS-XE (#493)","Stable XR6 XR66 devices (#471)","Implementation of storing failed installations into DB (stable 5.0.x)","Implemented TU for adding/removing users for Saos6 (#438)","Implemented TU for collecting transceiver information for IOS-XE (#439)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Downgraded sshd to 2.8.0","Fix uninstall -> install transition","Swagger: fix generation of operational APIs (5.0.X)","Fix CVEs","Fixed reading public key from NETCONF device (NPE)","Fixed distribution of mount failure from GNMi layer","Swagger: Fix path filtering"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["expose gnmi parameters","add overallStatus to multiple-nodes-rpc-output","Improved error message when connection cannot be created","Exposed DOMMountPointService configuration","Optimization of mountpoint notifications","Added logs into DOM Mountpoint Service"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Configurable re-sending cli commands","Fixed mount point creation for CLI topology","Removed unified-topology.yang","Refactored unified layer and mounting/unmounting process - updates","Refactored unified layer and mounting/unmounting process","Added logging level for shell to the logback.xml"]}],[{"i":"uniconfig-5020-release-notes","l":"Uniconfig 5.0.20 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Support for GetInstalledNodes in UC client - 5.0.x","Add getJSONOutput to UniConfig client","Added some commands for collecting data for IOS-XE (#515)","Skip unreachable nodes at commit"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed leafref version-drop","Suppress Netty FP CVEs","Fixed quit command in the shell","Fixed pattern handling and XPath extension parsing","Fixed parsing of seconds in XR native metadata unit","Swagger: fix generation of action nodes (5.0.X)","Swagger: fix no key lists generation (5.0.X)","Fixed locking of nodes from TX with enabled dedicated sessions (#523)","Fix bug in bulk edit operation","Swagger: fix generation of operation children from config container"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Change overallStatus when skiping unreachable nodes","handle user parameters input in GnmiDefaultParametersService","Removed 'reconcile' mountpoint parameter (#572)","Disable verification of supported query parameters (5.0.x) (#549)","Swagger: toggle generation of POST apis for containers","Bulk-edit rpc improvements"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Swagger: fix npe in custom operational path (5.0.X)","Added only-vlan parser, upgraded trunk-vlans for huawei (#528)","Callbacks authentication"]}],[{"i":"uniconfig-5021-release-notes","l":"Uniconfig 5.0.21 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Add migration for removing node-extension:reconcile"]}],[{"i":"uniconfig-5022-release-notes","l":"Uniconfig 5.0.22 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["make replace request on correct node for gnmi","Add GetTemplateNodes RPC and add support to Client","Implemented some new commands for Huawei TU (#580)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed encryption of leaves marked using deviations","Fix vulnerabilities by changing base docker image","Stop cleaning YANG repos associated to persisted nodes","Swagger: fix actions using custom operational path","Suppressed CVE-2021-4277 (#612)","Fix CVE-2021-37533"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: mandatory indicator","Improved registration of unexpected YANGs downloaded from device","Add compare-config RPC.","Changed logging level: Unable to map identifier to capability","Skipping unknown fields in GET request","Added an RPC input to enable error handling for execute-and-read RPC (#662)","Fetch Kafka settings to client.","add gnmi protocol to get-installed-nodes RPC","Swagger: add drop-down for topology-id parameter"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["PUT and DELETE operations for callbacks"]}],[{"i":"uniconfig-5023-release-notes","l":"Uniconfig 5.0.23 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implementation of RPD TUs for CER(Arris)","Implementation of Cable-Mac Oper TUs for CER(Arris)","Replace paths feature","Implementation of cable-upstream TUs for CER(Arris)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Added new control to check if cached prompt is invalid (#809)","Fixed writer of cable upstream interface","Added new control to check if cached prompt is invalid (#633)","Fix delete request in replace-paths","Fixed schema context building","Java based migration for huawei config","Bulk-edit - removed the version comparison before version drop procedure","Bump dependencycheck.version, update suppress for CVE-2022-41915, CVE-2022-41881"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: regular expressions with example values","Extended TreeConfigParser to handle arris device's behavior for cable-upstreams","Change isInstalled method implementation in uniconfig-client","JRE-17 compatibility","Added read option to bulk-edit RPC","Bump sshd to 2.9.2","Prefer 'latest' repository in latest repository update process","Change status code if transaction is not valid. (#711)"]}],[{"i":"uniconfig-5024-release-notes","l":"Uniconfig 5.0.24 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implemented RPD related commands to fiber-node TU for Arris Commscope","Swagger: difference between OpenAPI specifications"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed RPD related writers for Arris Commscope","Fixed update templates in CableInterfaceUpstreamConfigWriter for Arris Commscope","Fixed callback leaf-list input parameter (#948)","Added a verification to check if lineIndex is lower than total number of parsed lines for multiline commands","Fixed CLI SSH KEX initialization","Fixed data decryption during apply-template RPC","Fixed regex for \"show cable modem\" command (#897)","Generate action names in java constants"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: shorter operational path"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Implementation of RPD TUs for CER(Arris) (#819)"]}],[{"i":"uniconfig-5025-release-notes","l":"Uniconfig 5.0.25 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Subtree-based resolution of conflicts between committed nodes (#989)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fix: Duplicate module name in Yang schemas","Fixed update template to update fiber node for Arris Commscope","Fixed uninstall node rpc","Fixed parsing leaf-list into JSONObject","Fixed construction of Tree (callbacks system test)","Fixed problem with re-write data of transaction by other transaction (#1032)","Fixed method to add unistore FP","Fix deriving of DB reader path","fix read only lock in uniconfig task executor (#981)","Fixed overriding of default mount settings by uniconfig-client"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["cli-shell set callback... suggest only commands that contain input body","Setting default spin/park time in notification router config","Optimised sending of internal notifications","Optimised lookup in modified uniconfig-topology & network-topology modules","Optimise detection of updated mount data in notification monitoring system","Add additional logs to precondition checks in SchemaContextUtil","Subtree-based resolution of conflicts between committed nodes (#989)","Improved the processing time of sync RPC for ios devices","Optimisation of single transaction-log entry reading","Added dedicated reader for single transaction-log entry","Add batching process for parallel reading of config"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Optimalization of handling saos devices"]}],[{"i":"uniconfig-510-release-notes","l":"Uniconfig 5.1.0 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Support for GetInstalledNodes in UC client","Add getJSONOutput to UniConfig client","Added some commands for collecting data for IOS-XE (#527)","Skip unreachable nodes at commit","Fixed hardcoded part and added one more if condition to receiver transceiver data for IOS-XE (#490)","added implementation of XR6 and XR6.6 devices as native units","Implementation of storing failed installations into DB (main)","Implemented TU for adding/removing users for Saos6 (#360)","Implemented TU for collecting transceiver information for IOS-XE (#437)","Implemented TU for collecting all the information in NTP MCS for Saos6 and Saos8 (#352)","Implemented low priority fields for collecting inventory for SAOS6 CEN(ring) (#341)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Add migration for removing node-extension:reconcile","Bump units versions to 5.1.0-SNAPSHOT","Downgraded sshd to 2.8.0","Fix : bad package for GnmiDefaultParametersService","Fix bug in bulk edit operation","Fix CVE-2021-37533","Fix CVEs","fix Jsonb filter element filtering","Fix Logback and cleanup POMs","Fix uninstall -> install transition","Fixed cleanup of existing MP during mount-node RPC","Fixed distribution of mount failure from GNMi layer","Fixed encryption of sensitive info passed via template variables (#326)","Fixed hardcoded part and added one more if condition to receiver transceiver data for IOS-XE (#490)","Fixed leafref version-drop","Fixed locking of nodes from TX with enabled dedicated sessions (#522)","Fixed parsing of seconds in XR native metadata unit","Fixed passing leaf-list in shell callbacks","Fixed pattern handling and XPath extension parsing","Fixed quit command in the shell","Fixed reading public key from NETCONF device (NPE)","Fixed reconcile SQL migration file","Fixed synchronization in datastore transaction","Stop reporting metrics into log/logs.log and stdout (#598)","Suppress Netty FP CVEs","Swagger - Fix request generation","Swagger: fix generation of action nodes","Swagger: Fix generation of arguments in path","Swagger: fix generation of choice nodes","Swagger: fix generation of operation children from config container","Swagger: fix generation of operational APIs","Swagger: fix no key lists generation","Swagger: Fix path filtering"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["add overallStatus to multiple-nodes-rpc-output","Added logs into DOM Mountpoint Service","Bulk-edit rpc improvements","Bump dependency-check to 7.3.0","Bump logback","Change overallStatus when skiping unreachable nodes","Changed order of some commands for SAOS8 (#348)","Disable verification of supported query parameters (5.1.x) (#550)","expose gnmi parameters","Expose request-timeout parameter","Exposed DOMMountPointService configuration","handle user parameters input in GnmiDefaultParametersService","Improved error message when connection cannot be created","Improved reporting of parsing issues in RESTCONF","Optimization of mountpoint notifications","readEntireConfig toString returns plain content","Reading mount configuration in the uniconfig-client","Removed 'reconcile' mountpoint parameter (#573)","Swagger - Improve start file","Swagger: Custom operational path","Swagger: generate augmentations in respective modules","Swagger: toggle generation of POST apis for containers"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Swagger: fix npe in custom operational path","Added only-vlan parser, upgraded trunk-vlans for huawei (#444)","Callbacks authentication","Configurable re-sending cli commands","Fixed mount point creation for CLI topology","Removed unified-topology.yang","Refactored unified layer and mounting/unmounting process - updates","Refactored unified layer and mounting/unmounting process","Added logging level for shell to the logback.xml"]}],[{"i":"uniconfig-511-release-notes","l":"Uniconfig 5.1.1 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["make replace request on correct node for gnmi","Implemented some new commands for Huawei TU (#580) (#639)","Add GetTemplateNodes RPC and add support to Client","Implementation of MIB parser using ANTLR grammar"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed NPE for JSONCodecFactoryLoader","Fixed deserialization of uniconfig instance record read from DB (#700)","Fixed encryption of leaves marked using deviations","Fix vulnerabilities by changing base docker image","Stop cleaning YANG repos associated to persisted nodes","Swagger: fix actions using custom operational path","Suppressed CVE-2021-4277 (#613)"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: mandatory indicator","Improved registration of unexpected YANGs downloaded from device","Add compare-config RPC.","Changed logging level: Unable to map identifier to capability","Skipping unknown fields in GET request","Added an RPC input to enable error handling for execute-and-read RPC (#668)","Fetch Kafka settings to client.","gnmi support for upgrade-from-network RPC","add gnmi protocol to get-installed-nodes RPC","Swagger: add drop-down for topology-id parameter"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["PUT and DELETE operations for callbacks"]}],[{"i":"uniconfig-512-release-notes","l":"Uniconfig 5.1.2 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implementation of RPD TUs for CER(Arris)","Implementation of Cable-Mac Oper TUs for CER(Arris)","Replace paths feature","Implementation of cable-upstream TUs for CER(Arris)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Added new control to check if cached prompt is invalid (#812)","Fixed writer of cable upstream interface","Fix delete request in replace-paths","Fixed schema context building","Changed log level in mockito-configuration to INFO","Fix wrong groupIds","Java based migration for huawei config","Bulk-edit - removed the version comparison before version drop procedure","Bump dependencycheck.version, update suppress for CVE-2022-41915, CVE-2022-41881"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Add Dependency Upgrades to release notes","Added calc-diff result to audit logs","Added read option to bulk-edit RPC","Adopt Mockito 5","Bump dependency-check, cleanup unused suppressions","Bump sshd to 2.9.2","Change isInstalled method implementation in uniconfig-client","Change status code if transaction is not valid. (#699)","Enable dependabot updates","Extended TreeConfigParser to handle arris device's behavior for cable-upstreams","Migrate codebase to Java 17 & bump dependencies & clean maven structure","Prefer 'latest' repository in latest repository update process","README - Update Running from IDE section","Remove license server","Remove license token from README.md and run_uniconfig.sh script.","Replace com.google.common.base.Optional with java.util.Optional","Swagger: regular expressions with example values","Unify antlr4 version","Update README.md running uniconfig from IDE","Updated list of supported Unicode blocks (RegexUtils)"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["Bump antlr4-maven-plugin from 4.10.1 to 4.11.1","Bump byte-buddy.version from 1.12.22 to 1.13.0","Bump commons-dbcp2 from 2.7.0 to 2.9.0","Bump commons-lang3 from 3.7 to 3.12.0","Bump disruptor from 3.3.10 to 3.4.4","Bump embedded-postgres from 1.2.10 to 2.0.3","Bump grpc.version from 1.51.1 to 1.53.0","Bump httpclient from 4.5.13 to 4.5.14","Bump jackson-bom from 2.14.1 to 2.14.2","Bump jna.version from 4.5.0 to 5.13.0","Bump maven-enforcer-plugin from 3.1.0 to 3.2.1","Bump netty.version from 4.1.86.Final to 4.1.89.Final","Bump objenesis from 2.1 to 3.3","Bump okhttp.version from 4.9.1 to 4.10.0","Bump org.eclipse.jdt.annotation from 2.1.0 to 2.2.700","Bump perfmark-api from 0.25.0 to 0.26.0","Bump properties-maven-plugin from 1.0.0 to 1.1.0"]}],[{"i":"uniconfig-513-release-notes","l":"Uniconfig 5.1.3 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Swagger: difference between OpenAPI specifications"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fix UC not starting when using standalone database - binding","Fix UC not starting when using standalone database","Fix & rewrite calc-diff to new format","Fixed regex for \"show cable modem\" command (#898)","Generate action names in java constants"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Migrate to JUnit5","Removed unused JMX classes"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Implementation of RPD TUs for CER(Arris) (#862)"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["Bump actions/setup-python from 4.3.0 to 4.5.0 (#760)","Bump actions/upload-artifact from 3.1.1 to 3.1.2","Bump annotations from 3.0.1 to 3.0.1u2","Bump antlr4.version from 4.11.1 to 4.12.0","Bump async-http-client from 1.9.24 to 1.9.40","Bump AutoModality/action-clean from 1.1.0 to 1.1.1","Bump byte-buddy.version from 1.13.0 to 1.14.0","Bump commons-cli from 1.4 to 1.5.0","Bump commons-compress from 1.21 to 1.22","Bump commons-fileupload from 1.3.3 to 1.5","Bump crypt4j from 1.0.0 to 1.0.1","Bump docker/build-push-action from 2 to 4 (#761)","Bump docker/login-action from 2.0.0 to 2.1.0 (#762)","Bump dokka-maven-plugin from 1.5.30 to 1.7.20","Bump embedded-postgres-binaries-linux-amd64 from 13.2.0 to 13.10.0","Bump embedded-postgres-binaries-linux-amd64 from 13.2.0 to 15.2.0","Bump exec-maven-plugin from 1.5.0 to 3.1.0","Bump flyway-core from 7.8.1 to 9.15.0","Bump flyway-core from 9.15.0 to 9.15.1","Bump future-converter-java8-guava from 0.3.0 to 1.2.0","Bump gson from 2.9.0 to 2.10.1","Bump jackson-databind from 2.14.1 to 2.14.2","Bump jakarta.servlet-api from 5.0.0 to 6.0.0","Bump jakarta.ws.rs-api from 3.0.0 to 3.1.0","Bump janino from 2.6.1 to 3.1.9","Bump jaxb-impl from 3.0.2 to 4.0.2","Bump jaxen from 1.1.6 to 2.0.0","Bump jersey.version from 3.0.8 to 3.1.1","Bump jetty-bom from 11.0.11 to 11.0.13","Bump jline.version from 3.21.0 to 3.22.0","Bump jmh-core.version from 1.21 to 1.36","Bump joelwmale/webhook-action from 2.1.0 to 2.3.2 (#759)","Bump jsonassert from 1.5.0 to 1.5.1","Bump junit-jupiter-api from 5.9.1 to 5.9.2","Bump ktlint from 0.24.0 to 0.31.0","Bump maven-assembly-plugin from 3.4.2 to 3.5.0","Bump maven-deploy-plugin from 3.0.0 to 3.1.0","Bump maven-failsafe-plugin from 3.0.0-M8 to 3.0.0-M9","Bump maven-invoker-plugin from 3.4.0 to 3.5.0","Bump maven-jar-plugin from 3.0.2 to 3.3.0","Bump maven-javadoc-plugin from 3.4.1 to 3.5.0","Bump maven-resources-plugin from 3.0.1 to 3.3.0","Bump maven.surefire.version from 3.0.0-M8 to 3.0.0-M9","Bump metrics-core from 4.2.12 to 4.2.16","Bump opentelemetry-api from 1.9.0 to 1.23.1","Bump postgresql from 42.5.1 to 42.5.4","Bump protobuf-maven-plugin from 0.5.1 to 0.6.1","Bump protobuf.version from 3.21.7 to 3.22.0","Bump sevntu-checks from 1.43.0 to 1.44.1","Bump spring-jdbc from 5.3.24 to 5.3.25 (#855)","Bump spring.boot.version from 2.7.6 to 2.7.8","Bump spring.boot.version from 2.7.8 to 2.7.9","Bump stax2-api from 3.1.4 to 4.2.1","Bump stCarolas/setup-maven from 4.3 to 4.5","Bump swagger-core from 2.2.4 to 2.2.8","Bump swagger-parser from 1.0.31 to 1.0.64","Bump triemap from 1.1.0 to 1.2.0","Bump truth.version from 0.36 to 1.1.3","Bump value from 2.9.2 to 2.9.3"]}],[{"i":"uniconfig-514-release-notes","l":"Uniconfig 5.1.4 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implementation of MIB repository & context","Implemented RPD related commands to fiber-node TU for Arris Commscope"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed RPD related writers for Arris Commscope","Fixed update templates in CableInterfaceUpstreamConfigWriter for Arris Commscope","Fix calc-diff when data is in LeafNode","Fix subtree calc-diff in audit log when data has not changed","Fixed callback leaf-list input parameter (#949)","Added a verification to check if lineIndex is lower than total number of parsed lines for multiline commands","Fixed CLI SSH KEX initialization","Fixed data decryption during apply-template RPC"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: shorter operational path"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps): bump dokka-maven-plugin from 1.7.20 to 1.8.10","build(deps-dev): bump maven-plugin-annotations from 3.7.1 to 3.8.1","build(deps): bump json from 20220924 to 20230227","build(deps): bump dependency-check-maven from 8.1.0 to 8.1.2","Bump reflections from 0.9.11 to 0.10.2","Bump maven-compiler-plugin from 3.10.1 to 3.11.0","Bump jetty-bom from 11.0.13 to 11.0.14","Bump maven-plugin-plugin from 3.7.1 to 3.8.1","Bump metrics-core from 4.2.16 to 4.2.17","Bump spotbugs-maven-plugin from 4.7.3.0 to 4.7.3.2","Bump maven-dependency-plugin from 3.1.1 to 3.5.0","Bump checkstyle from 10.7.0 to 10.8.0","Bump maven-antrun-plugin from 1.8 to 3.1.0"]}],[{"i":"uniconfig-515-release-notes","l":"Uniconfig 5.1.5 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implement rate limiting (#1061)","Add DOMRpcService for gNOI","Subtree-based resolution of conflicts between committed nodes (#1008)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fix audit-log diff feature (#1056)","Fixed update template to update fiber node for Arris Commscope","Fix: Duplicate module name in Yang schemas","Fixed reading config until timeout (#1067)","Fixed uninstall node rpc","Fixed parsing leaf-list into JSONObject","Fixed construction of Tree (callbacks system test)","Fixed problem with re-write data of transaction by other transaction (#1031)","Fixed method to add unistore FP","Fix bug with audit log while calling commit RPC (#1007)","Fix deriving of DB reader path","Add missing sslpassword configuration parameter (#990)","fix read only lock in uniconfig task executor (#982)","Fixed overriding of default mount settings by uniconfig-client"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: Migrate unit tests to v3","Added topology-id to DCE notification","cli-shell set callback... suggest only commands that contain input body","Setting default spin/park time in notification router config","Optimised sending of internal notifications","Optimised lookup in modified uniconfig-topology & network-topology modules","Optimise detection of updated mount data in notification monitoring system","Add additional logs to precondition checks in SchemaContextUtil","Subtree-based resolution of conflicts between committed nodes (#1008)","Improved the processing time of sync RPC for ios devices","Optimisation of single transaction-log entry reading","Added dedicated reader for single transaction-log entry","Add batching process for parallel reading of config"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Arris CER interface bugfix (#1086)","Optimalization of handling saos devices"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.15.1 to 9.15.2","build(deps-dev): bump flyway-core from 9.15.2 to 9.16.0 (#1012)","build(deps-dev): bump flyway-core from 9.16.0 to 9.16.1 (#1052)","build(deps-dev): bump swagger-parser from 1.0.64 to 1.0.65 (#1074)","build(deps): bump byte-buddy.version from 1.14.0 to 1.14.1","build(deps): bump byte-buddy.version from 1.14.1 to 1.14.2 (#1003)","build(deps): bump checkstyle from 10.8.0 to 10.8.1 (#988)","build(deps): bump checkstyle from 10.8.1 to 10.9.2 (#1028)","build(deps): bump checkstyle from 10.9.2 to 10.9.3 (#1073)","build(deps): bump commons-compress from 1.22 to 1.23.0 (#1059)","build(deps): bump dependency-check-maven from 8.1.2 to 8.2.1 (#1062)","build(deps): bump grpc.version from 1.53.0 to 1.54.0 (#1071)","build(deps): bump jline.version from 3.22.0 to 3.23.0","build(deps): bump maven-deploy-plugin from 3.1.0 to 3.1.1 (#1072)","build(deps): bump maven-failsafe-plugin from 3.0.0-M9 to 3.0.0 (#1011)","build(deps): bump maven-help-plugin from 3.3.0 to 3.4.0 (#1025)","build(deps): bump maven-install-plugin from 3.1.0 to 3.1.1 (#1075)","build(deps): bump maven-release-plugin from 3.0.0-M7 to 3.0.0 (#1037)","build(deps): bump maven.core.version from 3.9.0 to 3.9.1 (#1027)","build(deps): bump maven.surefire.version from 3.0.0-M9 to 3.0.0 (#1013)","build(deps): bump metrics-core from 4.2.17 to 4.2.18 (#1038)","build(deps): bump mockito-core from 5.1.1 to 5.2.0 (#987)","build(deps): bump netty.version from 4.1.89.Final to 4.1.90.Final (#1010)","build(deps): bump opentelemetry-api from 1.23.1 to 1.24.0 (#999)","build(deps): bump postgresql from 42.5.4 to 42.6.0 (#1026)","build(deps): bump protobuf.version from 3.22.0 to 3.22.1","build(deps): bump protobuf.version from 3.22.1 to 3.22.2 (#1000)","build(deps): bump spotbugs-maven-plugin from 4.7.3.2 to 4.7.3.3 (#1070)","build(deps): bump spring-jdbc from 5.3.25 to 5.3.26 (#1036)","build(deps): bump spring.boot.version from 2.7.9 to 2.7.10 (#1069)","build(deps): bump swagger-core from 2.2.8 to 2.2.9 (#1039)"]}],[{"i":"uniconfig-516-release-notes","l":"Uniconfig 5.1.6 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Saos 8 command order fix.","Fixed execution order of commands for sub-port creation","Fixed a bug that causes cli closed error if config output and prompt has same length","Fix dryrun mount node task.","diff improvements (#1107)","Swagger: fix union type with patterns","Disable NETCONF level keepalive mechanism in streaming session","Fixed onEmpty section in templates for rpd ds and us conns for Arris Commscope","Fixed a bug that causes cli closed error for saos devices when commit or execute RPCs are triggered"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Refactoring ServiceInstanceWriter","Adjusted log levels of common logs","Swagger: filterPath improvement","diff improvements (#1107)","Rewrite kafka configs (#1105)"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["JSON input in Uniconfig shell problem fix","Shell logger","Create and publish Netconf test tool image to DockerHub"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.16.1 to 9.16.3 (#1139)","build(deps): bump actions/upload-artifact from 3.1.1 to 3.1.2 (#1131)","build(deps): bump bouncycastle.version from 1.72 to 1.73 (#1138)","build(deps): bump byte-buddy.version from 1.14.2 to 1.14.3","build(deps): bump byte-buddy.version from 1.14.3 to 1.14.4","build(deps): bump grpc.version from 1.54.0 to 1.54.1 (#1134)","build(deps): bump jetty-bom from 11.0.14 to 11.0.15 (#1127)","build(deps): bump jline.version from 3.22.0 to 3.23.0 (#998)","build(deps): bump json-path from 2.7.0 to 2.8.0 (#1093)","build(deps): bump kotlin.version from 1.8.10 to 1.8.20","build(deps): bump maven-enforcer-plugin from 3.2.1 to 3.3.0 (#1114)","build(deps): bump maven-invoker-plugin from 3.5.0 to 3.5.1 (#1115)","build(deps): bump maven-resources-plugin from 3.3.0 to 3.3.1","build(deps): bump netty.version from 4.1.90.Final to 4.1.91.Final (#1113)","build(deps): bump opentelemetry-api from 1.24.0 to 1.25.0 (#1135)","build(deps): bump protobuf.version from 3.22.2 to 3.22.3","build(deps): bump spotbugs-maven-plugin from 4.7.3.3 to 4.7.3.4 (#1126)","build(deps): bump triemap from 1.2.0 to 1.3.0"]}],[{"i":"uniconfig-517-release-notes","l":"Uniconfig 5.1.7 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Lazy loading/unloading of native schema contexts (#1171)","Creation of MIB context to SchemaContext adapter (#1169)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed lazy apply-template","Fixed a bug that causes cli closed error if config output's length is less than prompt's length after config output is trimmed","Fix Swagger regex example generation","Fixed loading of schema context from swagger directory","Make request max size configurable for gnmi devices","Integrate encryption in create-multiple-templates RPC.","Fixed UniconfigTransactionsMediator initialization (#1181)","Remove TestNG (#1159)","Fix UC stuck when CPU is full and queue is empty (#1168)"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Support for gnmi in shell","Implementation of idle timeout print in CLI (#1172)","Lazy loading/unloading of native schema contexts (#1171)","Improved the processing time of sync RPC for ios/iosxe devices","Unify movement in shell (#1005)","Enabled cable-upstream writer for interfaces that has number/number/number pattern as name","Improved apply-template RPC (#1111)"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Add lazy loading to shell"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.16.3 to 9.17.0 (#1177)","build(deps-dev): bump maven-plugin-annotations from 3.8.1 to 3.8.2 (#1152)","build(deps): bump checkstyle from 10.9.3 to 10.10.0 (#1176)","build(deps): bump jackson-bom from 2.14.2 to 2.15.0 (#1158)","build(deps): bump jackson-databind from 2.14.2 to 2.15.0 (#1153)","build(deps): bump jakarta.activation-api from 2.1.1 to 2.1.2 (#1178)","build(deps): bump jgrapht.version from 1.5.1 to 1.5.2","build(deps): bump junit.jupiter.version from 5.9.2 to 5.9.3 (#1175)","build(deps): bump kotlin.version from 1.8.20 to 1.8.21 (#1174)","build(deps): bump maven-checkstyle-plugin from 3.2.1 to 3.2.2 (#1157)","build(deps): bump maven-plugin-plugin from 3.8.1 to 3.8.2 (#1154)","build(deps): bump maven-project-info-reports-plugin from 3.4.2 to 3.4.3 (#1145)","build(deps): bump mockito.core.version from 5.2.0 to 5.3.1 (#1156)","build(deps): bump netty.version from 4.1.91.Final to 4.1.92.Final (#1173)","build(deps): bump okhttp.version from 4.10.0 to 4.11.0 (#1155)","build(deps): bump protobuf.version from 3.22.3 to 3.22.4"]}],[{"i":"uniconfig-518-release-notes","l":"Uniconfig 5.1.8 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Add change-encryption-status rpc (#1259) - UNIC-1090","Added some commands for collecting slot data for IOS-XE - VZ-734"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Swagger: Fix path filtering ignoring cruds in lists - UNIC-1315","Fix failing shell tests (#1285)","Compare decrypted strings in calculate-diff procedure (#1266) - UNIC-1173","Changed execution order of commands for CPE ZTP provision for SAOS8"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Improved error output when two transactions want to update same data … (#1238)","Swagger: Improve OpenAPI difference calculation - UNIC-1298","Provide option for reading mount-point info - UNIC-1097","Refactored InstanceIdentifierContext (#1249) - UNIC-1211","Add option to gnmi-topology to read specific data - PANT-72","Add unsuported keys for cli connection (#1203)","Add JIRA tag to release notes - UNIC-1243","Optimize LeafRef context build. - UNIC-988","Build LeafRef Tree in background. - UNIC-988","Separated reader/writer for IUCs from main classes for Arris Commscope","Unify annotation usage in uniconfig codebase","support error info (#1201) - UNIC-1136","Rewrite RestConf module DI (#1202) - UNIC-1101"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Create a new set of installation parameters"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.17.0 to 9.19.1","build(deps-dev): bump maven-plugin-annotations from 3.8.2 to 3.9.0 (#1265)","build(deps-dev): bump swagger-parser from 1.0.65 to 1.0.66","build(deps): bump antlr4.version from 4.12.0 to 4.13.0 (#1256)","build(deps): bump build-helper-maven-plugin from 3.3.0 to 3.4.0","build(deps): bump checkstyle from 10.10.0 to 10.11.0 (#1254)","build(deps): bump checkstyle from 10.11.0 to 10.12.0","build(deps): bump embedded-postgres from 2.0.3 to 2.0.4 (#1270)","build(deps): bump embedded-postgres-binaries-linux-amd64 from 13.10.0 to 13.11.0 (#1274)","build(deps): bump git-commit-id-maven-plugin from 5.0.0 to 6.0.0","build(deps): bump grpc.version from 1.54.1 to 1.55.1 (#1199)","build(deps): bump guice.version from 5.1.0 to 7.0.0 (#1253)","build(deps): bump jackson-bom from 2.15.0 to 2.15.1","build(deps): bump jackson-databind from 2.15.0 to 2.15.1 (#1264)","build(deps): bump jersey.version from 3.1.1 to 3.1.2","build(deps): bump json-smart from 2.4.10 to 2.4.11","build(deps): bump kotlinx-coroutines-core from 1.6.4 to 1.7.0 (#1198)","build(deps): bump kotlinx-coroutines-core from 1.7.0 to 1.7.1","build(deps): bump maven-assembly-plugin from 3.5.0 to 3.6.0 (#1272)","build(deps): bump maven-checkstyle-plugin from 3.2.2 to 3.3.0","build(deps): bump maven-failsafe-plugin from 3.0.0 to 3.1.0 (#1196)","build(deps): bump maven-plugin-plugin from 3.8.2 to 3.9.0","build(deps): bump maven-remote-resources-plugin from 3.0.0 to 3.1.0","build(deps): bump maven-source-plugin from 3.2.1 to 3.3.0 (#1271)","build(deps): bump maven-surefire-plugin from 3.0.0 to 3.1.0 (#1197)","build(deps): bump maven.core.version from 3.9.1 to 3.9.2 (#1215)","build(deps): bump netty.version from 4.1.92.Final to 4.1.93.Final","build(deps): bump opentelemetry-api from 1.25.0 to 1.26.0 (#1195)","build(deps): bump protobuf.version from 3.22.4 to 3.23.0","build(deps): bump protobuf.version from 3.23.0 to 3.23.1 (#1273)","build(deps): bump spring-jdbc from 6.0.8 to 6.0.9","build(deps): bump spring.boot.version from 3.0.6 to 3.0.7 (#1269)","build(deps): bump sshd.version from 2.9.2 to 2.10.0","build(deps): bump swagger-core from 2.2.9 to 2.2.10","build(deps): bump triemap from 1.3.0 to 1.3.1"]}],[{"i":"uniconfig-519-release-notes","l":"Uniconfig 5.1.9 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["DOMDataBroker for SNMP (#1299) - UNIC-1200","UNIC-1200","Integration of southbound RESTCONF RPC service to UniConfig shell (#1310) - UNIC-1310"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fix unreachable ports for device discovery RPC - UNIC-1322","Certificate Manager servers not getting created in appliance context - fixed output - UNIC-1309","Fix removing of exception from error-info/error-message - PANT-78","Add MapNode serialization to gNMI Update - UNIC-1323"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Changeable thread parameters - UNIC-1250","Swagger: Added html output to diff generation - UNIC-1324"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.19.1 to 9.19.4","build(deps-dev): bump swagger-parser from 1.0.66 to 1.0.67","build(deps): bump byte-buddy.version from 1.14.4 to 1.14.5","build(deps): bump commons-io from 2.11.0 to 2.12.0","build(deps): bump commons-io from 2.12.0 to 2.13.0","build(deps): bump dependency-check-maven from 8.2.1 to 8.3.1","build(deps): bump dokka-maven-plugin from 1.8.10 to 1.8.20","build(deps): bump grpc.version from 1.55.1 to 1.56.0","build(deps): bump guava.version from 31.1-jre to 32.0.0-jre","build(deps): bump guava.version from 32.0.0-jre to 32.0.1-jre","build(deps): bump jackson-bom from 2.15.1 to 2.15.2","build(deps): bump jackson-databind from 2.15.1 to 2.15.2","build(deps): bump jaxb-runtime from 4.0.2 to 4.0.3","build(deps): bump kafka-clients from 3.4.0 to 3.4.1","build(deps): bump kafka-clients from 3.4.1 to 3.5.0","build(deps): bump kotlin.version from 1.8.21 to 1.8.22","build(deps): bump maven-dependency-plugin from 3.5.0 to 3.6.0","build(deps): bump maven-failsafe-plugin from 3.1.0 to 3.1.2","build(deps): bump maven-project-info-reports-plugin from 3.4.3 to 3.4.4","build(deps): bump maven-project-info-reports-plugin from 3.4.4 to 3.4.5","build(deps): bump maven-release-plugin from 3.0.0 to 3.0.1","build(deps): bump maven-surefire-plugin from 3.1.0 to 3.1.2","build(deps): bump metrics-core from 4.2.18 to 4.2.19","build(deps): bump opentelemetry-api from 1.26.0 to 1.27.0","build(deps): bump protobuf.version from 3.23.1 to 3.23.2","build(deps): bump swagger-core from 2.2.10 to 2.2.11","build(deps): bump swagger-core from 2.2.11 to 2.2.12","build(deps): bump truth.version from 1.1.3 to 1.1.4"]}],[{"i":"uniconfig-5110-release-notes","l":"Uniconfig 5.1.10 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Get API returns 3 response for single request (#1396) - VHD-324","Fixed too long error output in NETCONF LOG message - UNIC-649","UNIC-1319 Issue with Netconf install WorkFlow - fix logs for testing workflows - UNIC-1319","Upgrade Template didn't load repository - UNIC-1334","Save yang repository in transaction - VHD-324"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["store netconf keys in db (#1380) - VHD-326","maven cleanup (#1379) - UNIC-1291","Removed unused code from sal-dom-spi and dependencies","Extract shell actions to use RestconfDOMActionService (#1346) - UNIC-1313"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Parse and fill datastore with initial JSON file in MDSAL mode"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.19.4 to 9.20.0","build(deps): bump actions/setup-python from 4.5.0 to 4.6.1","build(deps): bump bouncycastle.version from 1.73 to 1.74","build(deps): bump bouncycastle.version from 1.74 to 1.75","build(deps): bump checkstyle from 10.12.0 to 10.12.1","build(deps): bump commons-codec from 1.15 to 1.16.0","build(deps): bump docker/login-action from 2.1.0 to 2.2.0","build(deps): bump grpc.version from 1.56.0 to 1.56.1 (#1390)","build(deps): bump guava.version from 32.0.1-jre to 32.1.1-jre (#1388)","build(deps): bump janino from 3.1.9 to 3.1.10 (#1389)","build(deps): bump json from 20230227 to 20230618","build(deps): bump maven-clean-plugin from 3.2.0 to 3.3.1","build(deps): bump maven-invoker-plugin from 3.5.1 to 3.6.0","build(deps): bump maven-shade-plugin from 3.4.1 to 3.5.0","build(deps): bump metainf-services from 1.9 to 1.11 (#1375)","build(deps): bump mockito.core.version from 5.3.1 to 5.4.0","build(deps): bump netty-handler in /commons/parents/odlparent","build(deps): bump spotbugs-maven-plugin from 4.7.3.4 to 4.7.3.5","build(deps): bump spring-jdbc from 6.0.9 to 6.0.10","build(deps): bump spring.boot.version from 3.0.7 to 3.0.8","build(deps): bump sshd.version from 2.9.2 to 2.10.0 (#1251)","build(deps): bump swagger-core from 2.2.12 to 2.2.14","build(deps): bump truth.version from 1.1.4 to 1.1.5"]}],[{"i":"uniconfig-5111-release-notes","l":"Uniconfig 5.1.11 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Swagger: Fix generation of operational data from Uniconfig schemas (#1444) - UNIC-1280","Fixed unmounting of node that is in connecting state - UNIC-1281"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Changing use case of prompt stylization - UNIC-977","Enabling and cleaning up SHELL checkstyles (#1451) - UNIC-1345","Unifying, renaming and increasing readability of UC-shell - UNIC-977","Add mutable transaction to Shell (#1399) - UNIC-1312","Swagger: Unit tests - UNIC-1186","Removed sal-common-impl module","Merged mdsal-dom-spi to sal-dom-spi module (5.1.x) (#1417)","add ValueCase to gnmi codec - UNIC-1113"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Read All data type on specific paths"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps): bump org.apache.kafka:kafka-clients from 3.5.0 to 3.5.1 (#1476)","build(deps): bump maven.core.version from 3.9.2 to 3.9.3 (#1358)"]}],[{"i":"uniconfig-5112-release-notes","l":"Uniconfig 5.1.12 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["fix_regex_matching_of_identity - UNIC-1375"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Shell explicit show in config state (#1484) (#1503) - UNIC-1325","Shell caching data - 5.1.x-stable (#1496) - UNIC-1357"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Swagger: Code cleanup - 5.1.x-stable (#1489)"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps): bump org.codehaus.mojo:properties-maven-plugin from 1.1.0 to 1.2.0 (#1522)"]}],[{"i":"uniconfig-5113","l":"Uniconfig 5.1.13"},{"i":"whats-changed","l":"What's Changed"},{"i":"bug-fixes","l":"\uD83D\uDC1E Bug Fixes","p":["Fix key delimiter in URI","Swagger: Fix RPCs placed after mountpoint (#1582)","[UNIC-1410] Fix tx cleanup when request fails","[UNIC-1413] Fixed updating snapshot in immediate-commit model (#1629)","[UNIC-1420] Fix cli ssh session reconnect","[UNIC-1425] Fix crypto bug (#1664)","[UNIC-1352, UNIC-1254] Fix cluster issues (#1670)","[UNIC-1340] Fixed releasing of used YANG modules from memory (#1667)","[UNIC-1429] Fix replace is sent using delete operation","[UNIC-1432] Swagger: Fix generation of post list endpoints (#1674)","[UNIC-1430] - fix replace yang-patch for gnmi mountpoint","[UNIC-1404] UniConfig Shell - fix system augmentation","Fixed loading of YANG from path in client diff tool"]},{"i":"new-features","l":"✅ New Features","p":["[UNIC-1394] Client side diff","[UNIC-1402] UC Shell - default callbacks repository (#1701)"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["[UNIC-981] UniconfigShell: Remove explicit show submode from root mod…","[UNIC-1411] Add mapping to request/response log message","[UNIC-1394] Add overloaded build methods to client side diff","[UNIC-1394] exclude gnmi depenendecies from java client","[UNIC-1401] UniConfig Shell - one line SET / DELETE command (#1621)","[UNIC-1403] Unified format of shell audit logs (#1658)","[PANT-83] add logs for pant83 - STABLE"]},{"i":"api-changes","l":"\uD83D\uDDA5️ API Changes","p":["add gnmi-messages logging broker"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["Fix Jetty CVEs","5.1.x-stable - Maven 3.9.5"]},{"i":"other-changes","l":"\uD83D\uDD27 Other Changes","p":["5.1.13-SNAPSHOT","UniconfigShell: Improving suggestions menu","[FI-1693] Remove Jenkins-test from merge workflow","Release 5.1.13"]}],[{"i":"uniconfig-5114","l":"Uniconfig 5.1.14"},{"i":"whats-changed","l":"What's Changed"},{"i":"bug-fixes","l":"\uD83D\uDC1E Bug Fixes","p":["[UNIC-1429] Fix replace operation in GNMI set","[UNIC-1471] : Fix sync fail after failed installation was stored STABLE","[UNIC-1474] Improve performance of YANG repository loading process during mounting process (#1785)","UniConfig Shell - fix prompt callbacks bug","[UNIC-1492] - Fix rate-limiting","Fixed loading of gNMI YANG repository during MountNodeTask","[UNIC-1471] Add schema-cache storing into sync impl","Prevented sending no description command if there is no change for rpd description","Gnmi sb netconf cache loader stable","[UNIC-1494] - add migration for replace-paths","Fix get fallback schema context in cli shell.","Fix settings / callbacks cache"]},{"i":"new-features","l":"✅ New Features","p":["[UNIC-1075] Uniconfig shell hide / unhide command implementation"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["[UNIC-1408] UniConfig Shell - adjust cached data (#1745)","[UNIC-1405] UniConfig shell: set nested JSON data (#1752)","Improved logging (#1798)"]},{"i":"other-changes","l":"\uD83D\uDD27 Other Changes","p":["Suppress CVEs","[UNIC-1475] changing information about expired transaction in root mode","Release 5.1.14"]}],[{"i":"uniconfig-520-release-notes","l":"Uniconfig 5.2.0 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Parse NOTIFICATION-TYPE from MIB schemas. (#1545) - UNIC-1382","Add rpc change-encryption-keys (#1441) - UNIC-1239","support for gnmi-notifications - Notification + Subscription service (#1109) - UNIC-1184","UNIC-1308","SNMP topology (#1438) - UNIC-1202","UNIC-1202"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["fix_regex_matching_of_identity - UNIC-1375","Fixed generation of commit diff notifications","Fix identityRef parsing (#1502) - UNIC-1356","Swagger: Fix generation of operational data from Uniconfig schemas - UNIC-1280","Fixed unmounting of node that is in connecting state - UNIC-1281"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Add gnmi subscription parameters (#1505) - UNIC-1369","Add mutable transaction to Shell (#1399) - UNIC-1312","add ValueCase to gnmi codec - UNIC-1113","Caching of calculate-diff response in transaction","Changing use case of prompt stylization - UNIC-977","Cleanup dependencies in uniconfig module (#1542) - UNIC-1381","Constants refactoring (#1511) - UNIC-1257","Enabling and cleaning up SHELL checkstyles (#1451) - UNIC-1345","fix connection manager (#1532)","Merged mdsal-dom-api into sal-dom-api module (#1543)","Merged mdsal-dom-spi to sal-dom-spi module (#1415)","Prevented sending command for rpd-index, ucam and dcam attributes if there is no change for CER rpd interfaces","Removed sal-common-impl module (#1426)","Shell caching data (#1480) - UNIC-1357","Shell explicit show in config state (#1484) - UNIC-1325","SNMP refactoring of snmp-topology (#1472) - UNIC-1343","Swagger: Unit tests - UNIC-1186","UNIC-1369","Unifying, renaming and increasing readability of UC-shell - UNIC-977"]},{"i":"api","l":"\uD83D\uDCBB API","p":["Refactor connection-manager RPCs (#1423) - UNIC-1283"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Read All data type on specific paths"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump org.apache.commons:commons-lang3 from 3.12.0 to 3.13.0 (#1523)","build(deps-dev): bump org.flywaydb:flyway-core from 9.20.0 to 9.21.0 (#1474)","build(deps-dev): bump org.flywaydb:flyway-core from 9.21.0 to 9.21.1 (#1520)","build(deps): bump bouncycastle.version from 1.75 to 1.76 (#1521)","build(deps): bump ch.qos.logback:logback-classic from 1.4.6 to 1.4.8 (#1483)","build(deps): bump ch.qos.logback:logback-classic from 1.4.8 to 1.4.9 (#1562)","build(deps): bump com.google.guava:guava from 32.1.1-jre to 32.1.2-jre (#1557)","build(deps): bump com.puppycrawl.tools:checkstyle from 10.12.1 to 10.12.2 (#1561)","build(deps): bump grpc.version from 1.56.1 to 1.57.0 (#1517)","build(deps): bump grpc.version from 1.57.0 to 1.57.1 (#1559)","build(deps): bump jersey.version from 3.1.2 to 3.1.3 (#1518)","build(deps): bump jmh-core.version from 1.36 to 1.37 (#1558)","build(deps): bump json-smart from 2.4.11 to 2.5.0 (#1404)","build(deps): bump kotlin.version from 1.8.22 to 1.9.0 (#1391)","build(deps): bump kotlinx-coroutines-core from 1.7.1 to 1.7.2 (#1392)","build(deps): bump maven.core.version from 3.9.2 to 3.9.3 (#1358)","build(deps): bump maven.core.version from 3.9.3 to 3.9.4 (#1560)","build(deps): bump netty.version from 4.1.94.Final to 4.1.95.Final (#1477)","build(deps): bump netty.version from 4.1.95.Final to 4.1.96.Final (#1516)","build(deps): bump opentelemetry-api from 1.27.0 to 1.28.0 (#1406)","build(deps): bump org.apache.kafka:kafka-clients from 3.5.0 to 3.5.1 (#1476)","build(deps): bump org.codehaus.mojo:properties-maven-plugin from 1.1.0 to 1.2.0 (#1522)","build(deps): bump org.jetbrains.kotlinx:kotlinx-coroutines-core from 1.7.2 to 1.7.3 (#1519)","build(deps): bump org.junit.jupiter:junit-jupiter from 5.9.3 to 5.10.0 (#1479)","build(deps): bump org.xmlunit:xmlunit-legacy from 2.6.1 to 2.9.1 (#1478)","build(deps): bump protobuf.version from 3.23.2 to 3.23.4 (#1395)","build(deps): bump spring-jdbc from 6.0.10 to 6.0.11 (#1434)","build(deps): bump spring.boot.version from 3.1.1 to 3.1.2 (#1475)","build(deps): bump swagger-core from 2.2.14 to 2.2.15 (#1405)"]}],[{"i":"uniconfig-521","l":"Uniconfig 5.2.1"},{"i":"whats-changed","l":"What's Changed"},{"i":"bug-fixes","l":"\uD83D\uDC1E Bug Fixes","p":["[UNIC-1273] Use Jetty embedded server","[UNIC-1340] Fixed releasing of used YANG modules from memory","[UNIC-1352, UNIC-1254] Fix cluster issues","[UNIC-1365] Gnmi stream fixes","[UNIC-1390] Swagger: Fix RPCs placed after mountpoint","[UNIC-1395] Fix key delimiter in URI","[UNIC-1399] Fix issues with shell","[UNIC-1399] Switch shell terminal back to JNA","[UNIC-1404] UniConfig Shell - fix system augmentation","[UNIC-1410] Fix tx cleanup when request fails","[UNIC-1410] Fix_tx_closing","[UNIC-1413] Fixed updating snapshot in immediate-commit model","[UNIC-1420] Fix cli ssh session reconnect","[UNIC-1423] Fix identityRef as listEntry key in templatesg","[UNIC-1425] Fix crypto bug","[UNIC-1429] Fix replace is sent using delete operation","[UNIC-1430] Fix replace yang-patch for gnmi mountpoint","[UNIC-1432] Swagger: Fix generation of post list endpoints","[UNIC-1446] Fix SpotBugs violations - Reliance on default encoding","[UNIC-1447] Fix SpotBugs violations - Multithreaded correctness","[UNIC-1448] Fix SpotBugs violations - Use a localized version of String.toUpperCase() and String.toLowerCase()","[UNIC-1451] Fix SpotBugs violations - Correctness","[UNIC-1463] Remove duplicates from Set","Add git registry to dependabot.yml","Additional fix to calculate diff rpc","Cleanup test resources properly","Data-change-events publisher fix","Fix calculate diff rpc","Fix immediate commit model and submit successfull nodes.","Fix mapEntryNodes in gnmi notifications","Fix reading of actual YANG repository from mountpoint data","Fix show UC status script","Fix show_uniconfig_status script.","Fix skip of unreachable-nodes.","Fix SNMP Notification bean creation","Fixed DateTime format in the transaction-log","Fixed loading of YANG from path in client diff tool (#1747)","Prevented sending no description command if there is no change for rpd description","Registry attempt no.2","SNMP adjust exception","SNMP Node id is incorrectly parsed"]},{"i":"new-features","l":"✅ New Features","p":["[UNIC-1394] Client side diff","[UNIC-1373] Implemented dryrun-commit for GNMI topology","[UNIC-1398] SNMP notifications","[UNIC-1402] UC Shell - default callbacks repository","[UNIC-1218] Add dynamic property module"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["[PANT-83] add logs for pant83","[UNIC-1154] Integrate JOOQ into database access layer","[UNIC-1218] Rewrite database connection pool and connection properties","[UNIC-1223] ODL parent cleanup","[UNIC-1242] Remove PR links from the uploaded release notes","[UNIC-1242] Unify generation of release notes","[UNIC-1245] Replacing Guava future by CompletableFuture","[UNIC-1258] Fix issues reported by SpotBugs","[UNIC-1273] Adjust bootstrapping of web containers","[UNIC-1370] Adjust notification result parsing","[UNIC-1386] Map correct ObjectTypes to NotificationTypes","[UNIC-1391] Add SNMP notifications to SchemaContext","[UNIC-1394] Add overloaded build methods to client side diff","[UNIC-1394] Remove gnmi dependencies from java client","[UNIC-1401] UniConfig Shell - one line SET / DELETE command","[UNIC-1403] Unified format of shell audit logs","[UNIC-1411] Add mapping to request/response log message (#1630)","[UNIC-1412] change gnmi packaging","[UNIC-1435] Refactor transaction-log to JOOQ style","[UNIC-1441] SNMP config classes","[UNIC-1449] Fix SpotBugs violations - Performance","[UNIC-1463] Fix SpotBugs violations - Code vulnerabilities","[UNIC-980] UniconfigShell: Improving suggestions menu","[UNIC-981] UniconfigShell: Remove explicit show submode from root mode"]},{"i":"api-changes","l":"\uD83D\uDDA5️ API Changes","p":["[UNIC-1289] Refactor RPCs: revert-changes, query-config, device-discovery","[UNIC-1380] Add gnmi-messages logging broker","[UNIC-1287] Refactor snapshot-manager RPCs","[UNIC-1282] Refactoring uniconfig manager RPCs"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["[UNIC-1223] Align an embedded kafka version with the clients provided by Spring","build(deps): bump actions/checkout from 3 to 4","build(deps): bump actions/setup-python from 4.6.1 to 4.7.1","build(deps): bump actions/upload-artifact from 3.1.2 to 3.1.3","build(deps): bump antlr4.version from 4.13.0 to 4.13.1","build(deps): bump com.github.gantsign.maven:ktlint-maven-plugin from 1.16.0 to 2.0.0","build(deps): bump com.github.gantsign.maven:ktlint-maven-plugin from 2.0.0 to 3.0.0","build(deps): bump com.github.spotbugs:spotbugs-maven-plugin from 4.7.3.5 to 4.7.3.6","build(deps): bump com.puppycrawl.tools:checkstyle from 10.12.2 to 10.12.3","build(deps): bump com.puppycrawl.tools:checkstyle from 10.12.3 to 10.12.4","build(deps): bump commons-io:commons-io from 2.13.0 to 2.14.0","build(deps): bump commons-net:commons-net from 3.9.0 to 3.10.0","build(deps): bump docker/build-push-action from 4 to 5","build(deps): bump docker/login-action from 2.2.0 to 3.0.0","build(deps): bump grpc.version from 1.57.1 to 1.57.2","build(deps): bump grpc.version from 1.57.2 to 1.58.0","build(deps): bump io.swagger.core.v3:swagger-core from 2.2.15 to 2.2.16","build(deps): bump io.zonky.test.postgres:embedded-postgres-binaries-linux-amd64 from 13.11.0 to 13.12.0","build(deps): bump kotlin.version from 1.9.0 to 1.9.10","build(deps): bump org.apache.commons:commons-compress from 1.23.0 to 1.24.0","build(deps): bump org.apache.maven.plugins:maven-enforcer-plugin from 3.3.0 to 3.4.0","build(deps): bump org.apache.maven.plugins:maven-enforcer-plugin from 3.4.0 to 3.4.1","build(deps): bump org.apache.maven.plugins:maven-javadoc-plugin from 3.5.0 to 3.6.0","build(deps): bump org.apache.maven.plugins:maven-shade-plugin from 3.5.0 to 3.5.1","build(deps): bump org.immutables:value from 2.9.3 to 2.10.0","build(deps): bump org.jetbrains.dokka:dokka-maven-plugin from 1.8.20 to 1.9.0","build(deps): bump org.json:json from 20230618 to 20231013","build(deps): bump org.owasp:dependency-check-maven from 8.3.1 to 8.4.0","build(deps): bump org.springframework.cloud:spring-cloud-dependencies from 2022.0.3 to 2022.0.4","build(deps): bump protobuf.version from 3.23.4 to 3.24.0","build(deps): bump protobuf.version from 3.24.0 to 3.24.1","build(deps): bump protobuf.version from 3.24.1 to 3.24.2","build(deps): bump protobuf.version from 3.24.2 to 3.24.3","build(deps): bump protobuf.version from 3.24.3 to 3.24.4","build(deps): bump spring.boot.version from 3.1.2 to 3.1.3","build(deps): bump spring.boot.version from 3.1.3 to 3.1.4","Maven 3.9.5"]},{"i":"other-changes","l":"\uD83D\uDD27 Other Changes","p":["5.2.1-SNAPSHOT","[FI-1693] Remove Jenkins-test from merge workflow","Workflows: update cluster IP from 10.19.0.67 to 10.19.0.242 and","Workflows: remove VPN to FRINX for postgresDB.","Workflows: update path to new VM of postgresDB.","Workflows: update path and remove FRINX VPN for embeded tests.","Removed forgotten LOG","Release 5.2.1"]}],[{"i":"uniconfig-522","l":"Uniconfig 5.2.2"},{"i":"whats-changed","l":"What's Changed"},{"i":"bug-fixes","l":"\uD83D\uDC1E Bug Fixes","p":["[UNIC-1405] UniConfig shell: set nested JSON data","[UNIC-1429] Fix replace operation in GNMI set (MAIN)","[UNIC-1450] Spotbugs fixes - Bad practice","[UNIC-1471] : Fix sync fails after failed installation was stored in DB","[UNIC-1471] Add schema-cache storing into sync impl","[UNIC-1474] Improve performance of YANG repository loading process during mounting process","[UNIC-1475]: generalizing information about expired transaction","[UNIC-1494] - add migration for replace-paths","Add fix for reading duplicate properties","Caching request body copier","Fix bad migration embedded kafka properties from old UC version to new","Fix exception in loading yang schemas","Fix get fallback schema context in cli shell.","Fix update property value to null bug","Removed the forgotten callbacks-models dependencies","Set forgotten crypto properties in creation crypto config.","UniConfig Shell - fix prompt callbacks bug","Use a Set instead of a List in MibRepository"]},{"i":"new-features","l":"✅ New Features","p":["[UNIC-1075] Uniconfig shell hide / unhide command implementation","[UNIC-1028] Connect/Disconnect node RPC"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["[UNIC-1408] UniConfig Shell - adjust cached data","[UNIC-1374] Fixed sending install-node RPC request without mandatory fields","[UNIC-1445] Refactor yang-repo to JOOQ style","Refactoring Properties","Add spotbugs-maven-plugin configuration","Optimize DB read-only transaction","Improved logging","Add Google ErrorProne plugin","[UNIC-1487] return to the same mode when transaction expires"]},{"i":"api-changes","l":"\uD83D\uDDA5️ API Changes","p":["[UNIC-1290] Refactor data-change-events RPCs"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps): bump com.github.spotbugs:spotbugs-annotations from 4.7.3 to 4.8.0","build(deps): bump com.google.guava:guava from 32.1.2-jre to 32.1.3-jre","build(deps): bump org.codehaus.woodstox:stax2-api from 4.2.1 to 4.2.2","build(deps): bump io.swagger.core.v3:swagger-core from 2.2.16 to 2.2.17","build(deps): bump com.fasterxml.jackson.core:jackson-databind from 2.15.2 to 2.15.3","build(deps): bump org.apache.maven.plugins:maven-plugin-plugin from 3.9.0 to 3.10.1","build(deps): bump org.jetbrains.dokka:dokka-maven-plugin from 1.9.0 to 1.9.10","build(deps): bump io.github.git-commit-id:git-commit-id-maven-plugin from 6.0.0 to 7.0.0","build(deps-dev): bump org.apache.maven.plugin-tools:maven-plugin-annotations from 3.9.0 to 3.10.1","build(deps): bump grpc.version from 1.58.0 to 1.59.0","build(deps): bump spring.boot.version from 3.1.4 to 3.1.5","build(deps): bump sshd.version from 2.10.0 to 2.11.0","build(deps): bump org.owasp:dependency-check-maven from 8.4.0 to 8.4.2"]},{"i":"other-changes","l":"\uD83D\uDD27 Other Changes","p":["Rename distro/uniconfig-modules/uniconfig to main","Prepared sample docker compose file","Move main.jar into the root","Set kafka enabled to false","Properties overhaul","Suppress CVEs","Use NetconfCacheLoader in gnmi-sb instead of custom yang parsing","Fix binding empty properties","Rewrite client to new properties RPC","Fix dependency management for starting UniConfig","Release 5.2.2"]}],[{"l":"Translation Units","p":["This repository contains documentation for all available translation units for the FRINX ODL CLI service module. A translation unit is a piece of code that includes handlers to read from or write to a specific device (e.g. Cisco IOS classic router) and facilitates the translation in OpenConfig models. The purpose of this documentation is to see which commands can be read and set and how they map to the respective YANG models. Every section has a README file that provides an overview of all show and configuration commands that are supported. Multiple translation units are finally packaged together and made available as a karaf feature that can be installed at runtime."]},{"l":"Table of Contents","p":["URL","URL Operations","GET","PUT","DELETE","OPERATIONAL datasets","OPENCONFIG YANG","OS COMMANDS","DEVICE YANG","UNIT","CONFIGURATION datasets"]},{"l":"URL","p":["can be either cli or unified","each list item argument MUST be followed by list key. Usually the key is mapped to just one leaf (identifier, name, etc.), but in some cases, the key is created using more leafs. In this case, in the URL, the keys follow each other in order specified by YANG.","Each URL has a base format:","Example:","for each container or list in YANG model, there MUST be an argument in the URL","if the URL is tied with a body, the top-level element in the body must be the last element in the URL","Let's say you want to be even more specific and list details just about one particular interface. You can view the data by adding 'interface=' to the URL.","Let's say you want to list all areas in a specific OSPF. To obtain this data, you can trim the part: '/area=/interfaces' from the URL.","mountpoint name","network-instances argument is a list. We want to specify one item from that list (specific network instance), therefore the URL continues with ‘network-instance'. The key in network-instance is the identifier '' (e.g. vrf1) which follows the list item argument. Complex key is needed for protocol argument. The key is protocol-type followed by process-id. (frinx-openconfig-policy-types:OSPF, )","same for 'area='","Simplified example:","The general steps in creating the URL are following:","the top level argument must contain the name of the model. The name of the model must also be specified for YANG identities.","the URL contains an identity which is a part of a key for protocol list. This identity is prefixed by model name: ‘frinx-openconfig-policy-types:OSPF’","The URL will always point to either operational or config datastore and to the node we want to get the information from. You can always check if the particular device is registered by issuing GET on :","top-level argument contains also YANG model name: ‘frinx-openconfig-network-instance’","URLs are modular. By changing the URL you can move along the YANG data tree.","very specific URL listing interfaces under one specific area in OSPF under specific VRF","You can create a minimalistic YANG tree out of the URL:"]},{"l":"URL Operations","p":["Each show command supports only one http operation: GET ."]},{"l":"GET","p":["GET operation can be issued on both config/operational datastore. Config datastore reflects how the device is configured. Operational datastore reflects the state of the device. In most cases the information is the same.","Example of a case where the information is not the same (the only difference in requests is config vs operational):","Configuration commands support PUT for create/replace data. This operation requires HTTP body, which contains openconfig YANG model of the configuration you want to send to the router. Another operation supported by configuration commands is DELETE, which removes data from the device. Both operations need to be issued on config datastore.","For modifications of the data, you can use also PATCH method, that does not replace the entire data structure, only the parts that are different.","Example:","We want to create a new BGP neighbor:","The IOS command is:"]},{"l":"PUT","p":["BODY:","WARNING: PUT operation does not merge data. In this example if you have already configured some BGP neighbors, this request will REMOVE all of them and create just the one described in the PUT body. The solution is to first issue GET, copy existing configuration and add/change items there, or use PATCH method.","If we want to DELETE a BGP neighbor, the body is not needed, the URL needs to be specific to the neighbor we want to delete:"]},{"l":"DELETE","p":["This operation will issue following command:","DELETE operation always removes the last argument of the URL."]},{"l":"OPERATIONAL datasets","p":["Go to operational datasets","Show commands are commands that usually on Cisco device start with 'show'. The aim is to obtain data from the router."]},{"i":"url-1","l":"URL","p":["GET operation issued on operational datastore"]},{"l":"OPENCONFIG YANG","p":["In case of show commands this section is a sample output of a particular show command."]},{"l":"OS COMMANDS","p":["In this section we list the actual router commands with sample outputs, where the data obtained and transformed into Openconfig YANG is marked as bold. We list show commands and outputs for each supported device OS.","IOS XR | IOS Classic/XE | Junos | SAOS"]},{"l":"DEVICE YANG","p":["In case of CLI units, the unit parses the output of the CLI command directly into OC YANG. In case of Netconf units, the output is mapped to OC YANG through Device YANG (YANG model supported by the device). In case of Netconf units, the YANG is also written in documentation. This section is a link to XML unit test input testing this operation."]},{"l":"UNIT","p":["Link to github code where this show commmand is implemented along with unit version range."]},{"l":"CONFIGURATION datasets","p":["Go to configuration datasets"]},{"i":"url-2","l":"URL","p":["PUT operation with given URL will result in creating of data in config datastore DELETE operation with given URL will result in removing data in config datastore"]},{"i":"openconfig-yang-1","l":"OPENCONFIG YANG","p":["In case of configuration commands, this section represents the HTTP body in PUT operation"]},{"i":"os-commands-1","l":"OS COMMANDS","p":["In this section we list the actual router commands that are mapped to the Openconfig YANG model. Data transformed into Openconfig YANG is marked as bold. We list commands for each supported device OS.","IOS XR | IOS Classic/XE | Junos | SAOS"]},{"i":"device-yang-1","l":"DEVICE YANG","p":["In case od Netconf units, the device yang represents command sent to the device in device YANG model. This section is a link to XML unit test input testing this configuration."]},{"i":"unit-1","l":"UNIT","p":["Link to github code where this config commmand is implemented along with unit version range."]}],[{"l":"IETF L2VPN YANG"},{"l":"Scenario"},{"i":"l2p2pvpws","l":"L2P2P/VPWS","p":["l2vpn-instance/type == vpws-instance-type only two endpoints"]},{"l":"Local-Local","p":["connection between two local ports on a host (pe-node-id`s of endpoints match)"]},{"i":"ietf--yang","l":"IETF YANG"},{"l":"OPENCONFIG YANG"},{"l":"pe01"}],[{"l":"IETF L2VPN YANG"},{"l":"Scenario"},{"i":"l2p2pvpws","l":"L2P2P/VPWS","p":["l2vpn-instance/type == vpws-instance-type only two endpoints"]},{"l":"Local-Remote","p":["connection between local and remote hosts (pe-node-id`s of endpoints do not match)"]},{"i":"ietf--yang","l":"IETF YANG"},{"l":"OPENCONFIG YANG"},{"l":"pe01"},{"l":"PE2"}],[{"l":"IETF L2VPN YANG"},{"l":"Scenario"},{"i":"l2p2pvpls","l":"L2P2P/VPLS","p":["l2vpn-instance/type == vpls-instance-type Two or more endpoints"]},{"i":"ietf--yang","l":"IETF YANG"},{"l":"OPENCONFIG YANG"},{"l":"pe01"},{"l":"pe02"},{"l":"pe03"}],[{"l":"IETF L3VPN YANG"},{"l":"IETF YANG"},{"l":"OPENCONFIG YANG"}],[{"i":"#","p":["Access control","ACL","ACL interfaces","BGP","CDP","connection point","connection point l2vpn","Discovery protocols","Ethernet interface","Ethernet OAM","Ethernet Virtual Circuit","Ethernet Virtual Private Network","EVC","EVPN","FDP","Hot Standby Router Protocol","HSRP","Interfaces","Internet Protocol Security","IPsec","IS-IS","L2P2P","L2VPN","L3 VLAN interface","L3VPN","l3vpn with BGP","l3vpn with OSPF","LAG interface","Monitoring","MPLS","MPLS LDP","MPLS TE","MPLS TE RSVP","MPLS Tunnel","NetFlow","NetFlow interfaces","Network Instance","Network Instances","OSPF","OSPFv3","PF interfaces","Policy Forwarding","Probes","Protocols","Quality of Service","Routing Policy","SNMP","Spanning Tree Protocol","STP","SYSLOG"]},{"l":"Interfaces"},{"l":"Ethernet interface"},{"l":"LAG interface"},{"l":"L3 VLAN interface"},{"l":"Network Instances"},{"l":"Network Instance"},{"l":"Protocols"},{"l":"BGP"},{"l":"OSPF"},{"l":"OSPFv3"},{"l":"IS-IS"},{"l":"MPLS"},{"l":"MPLS TE"},{"l":"MPLS Tunnel"},{"l":"MPLS TE RSVP"},{"l":"MPLS LDP"},{"l":"Policy Forwarding"},{"l":"PF interfaces"},{"l":"L2P2P"},{"l":"connection point"},{"l":"L2VPN"},{"l":"connection point l2vpn"},{"l":"L3VPN"},{"l":"l3vpn with BGP"},{"l":"l3vpn with OSPF"},{"l":"Discovery protocols"},{"l":"CDP"},{"l":"FDP"},{"l":"Monitoring"},{"l":"SNMP"},{"l":"SYSLOG"},{"l":"Probes"},{"l":"Ethernet OAM"},{"l":"Hot Standby Router Protocol"},{"l":"HSRP"},{"l":"Access control"},{"l":"ACL"},{"l":"ACL interfaces"},{"l":"Spanning Tree Protocol"},{"l":"STP"},{"l":"Routing Policy"},{"i":"routing-policy-1","l":"Routing Policy"},{"l":"NetFlow"},{"l":"NetFlow interfaces"},{"l":"Quality of Service"},{"i":"quality-of-service-1","l":"Quality of Service"},{"l":"Ethernet Virtual Private Network"},{"i":"ethernet-virtual-private-network-1","l":"Ethernet Virtual Private Network"},{"l":"Internet Protocol Security"},{"l":"IPsec"}],[{"l":"Access Control List"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534-ios-xr-662","l":"Cisco IOS XR 5.3.4, IOS XR 6.6.2"},{"l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xe-1542s","l":"Cisco IOS XE 15.4(2)S"},{"i":"cli-1","l":"CLI"},{"l":"Examples"},{"i":"unit-1","l":"Unit","p":["Link to github : xe-unit"]},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-2","l":"CLI"},{"i":"unit-2","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-3","l":"CLI"},{"i":"unit-3","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"i":"cli-4","l":"CLI","p":["iacl_intf_index, iacl_subintf_index is a conversion of set ."]},{"i":"unit-4","l":"Unit","p":["Link to github : junos-unit"]}],[{"l":"Access Control List"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"l":"Cisco IOS Classic"},{"l":"CLI","p":["ipv4|ipv6 is a conversion of* eq|neq|range * is a conversion of or , operation is selected by entered port range *eq|neq|range * is a conversion of or , operatioons is selected by entered port range | acl option could be defined by enumeration named options or by number in range 0-255 ** is a conversion of , when true, value is \"established\", when false, there is empty value \"\""]},{"l":"Examples"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"i":"cli-1","l":"CLI","p":["ipv4|ipv6 is a conversion of"]},{"i":"examples-1","l":"Examples"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"i":"cli-2","l":"CLI","p":["ipv4|ipv6 is a conversion of"]},{"i":"examples-2","l":"Examples"},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xe-1542s","l":"Cisco IOS XE 15.4(2)S"},{"i":"cli-3","l":"CLI","p":["** is a conversion of , when true, value is \"established\", when false, there is empty value \"\""]},{"i":"examples-3","l":"Examples"},{"i":"unit-2","l":"Unit","p":["Link to github : xe-unit"]},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-4","l":"CLI"},{"i":"unit-3","l":"Unit","p":["Link to github : junos-unit"]},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"i":"cli-5","l":"CLI","p":["conversion is ACCEPT = allow, DROP = deny* access-list disable profile * is a conversion of frinx-acl-extension:enabled set to false. Default value is true."]},{"i":"unit-4","l":"Unit","p":["Link to github : [saos-unit]"]}],[{"l":"cable DOWNSTREAM CONTROLLER-PROFILE"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE 16"},{"l":"CLI","p":["can be either a single number or a list of numbers (0 31 which represents all the values from 0 to 31)"]},{"l":"Unit","p":["Link to github : ios-xe-unit"]}],[{"l":"cable FIBER-NODE"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE 16"},{"l":"CLI","p":["and are in commands input with whitespace dividing name and number (Downstream-Cable 1/0/16)"]},{"l":"Unit","p":["Link to github : ios-xe-unit"]}],[{"l":"cable RPD"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE 16"},{"l":"CLI","p":["and are in commands input with whitespace dividing name and number (Downstream-Cable 1/0/16)","no principal is a conversion of set false principal is a conversion of set true"]},{"l":"Unit","p":["Link to github : ios-xe-unit"]}],[{"l":"BRIDGE interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS 12"},{"l":"CLI","p":["is parsed from example is BDI10 -> is 10","no shutdown is a conversion of set true shutdown is a conversion of set false no snmp trap link-status is a conversion of set false snmp trap link-status is a conversion of set true"]},{"l":"Unit","p":["Link to github : ios-unit"]}],[{"l":"CABLE interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE 16"},{"l":"CLI","p":["are read from all lines and input into one attribute \"rf-channels\" and are in commands input with whitespace dividing name and number (Downstream-Cable 1/0/16)"]},{"l":"Unit","p":["Link to github : ios-xe-unit"]}],[{"l":"Ethernet interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI","p":["is a conversion of no shutdown is a conversion of set true shutdown is a conversion of set false switchport port-security is a conversion of set true no switchport port-security is a conversion of set false can be \"protect\", \"restrict\" or \"shutdown\" can be \"absolute\" or \"inactivity\" switchport port-security aging static is a conversion of set true no switchport port-security aging static is a conversion of set false lldp transmit is a conversion of set true no lldp transmit is a conversion of set false lldp receive is a conversion of set true no lldp receive is a conversion of set false negotiation auto is a conversion of set true no negotiation auto is a conversion of set false cdp enable is a conversion of set true no cdp enable is a conversion of set false is parsed from example is Port-channel3 -> is 3 mode on is a conversion of set to frinx-openconfig-lacp:ON can be \"default\" or \"rj45\" or \"sfp\" can be \"broadcast\" or \"multicast\" or \"unicast\"","is conversion of"]},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xe-15-16-17","l":"Cisco IOS XE 15, 16, 17"},{"i":"cli-1","l":"CLI","p":["is a conversion of no shutdown is a conversion of set true shutdown is a conversion of set false normal is a conversion of \"\" set \"NORMAL\" fast is a conversion of \"\" set \"FAST\" lldp transmit is a conversion of set true no lldp transmit is a conversion of set false lldp receive is a conversion of set true no lldp receive is a conversion of set false negotiation auto is a conversion of set true no negotiation auto is a conversion of set false is parsed from example is Port-channel3 -> is 3 mode on is a conversion of set to frinx-openconfig-lacp:ON can be \"default\" or \"rj45\" or \"sfp\" can be \"broadcast\" or \"multicast\" or \"unicast\" service instance trunk ethernet is conversion of set true* service instance ethernet * is conversion of set false* encapsulation untagged , dot1q * is conversion of set true* encapsulation dot1q * is conversion of set false can be \"ingress\" or \"egress\" can be \"pop\" or \"push\" or \"translate\" can be \"tunnel\" or \"peer\" or \"forward\" can be \"cdp\" or \"vtp\" or \"lacp\" or \"lldp\" or \"mmrp\" or \"mvrp\" or \"stp\" or \"RB\" or \"RC\" or \"RD\" or \"RF\""]},{"i":"unit-1","l":"Unit","p":["Link to github : ios-xe-unit"]},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"i":"cli-2","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false is a conversion of no dampening is a conversion of set false lacp period short is a conversion of set to frinx-openconfig-lacp:FAST no lacp period short is a conversion of set to frinx-openconfig-lacp:SLOW if is not specified then command bundle id mode on is used mode active is a conversion of set to frinx-openconfig-lacp:ACTIVE mode passive is a conversion of set to frinx-openconfig-lacp:PASSIVE ipv6 nd suppress-ra is a conversion of set true","is conversion of"]},{"i":"unit-2","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-623","l":"Cisco IOS XR 6.2.3"},{"i":"cli-3","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false no dampening is a conversion of set false lacp period short is a conversion of set to frinx-openconfig-lacp:FAST no lacp period short is a conversion of set to frinx-openconfig-lacp:SLOW if is not specified then command bundle id mode on is used mode active is a conversion of set to frinx-openconfig-lacp:ACTIVE mode passive is a conversion of set to frinx-openconfig-lacp:PASSIVE","is conversion of"]},{"i":"cisco-ios-xr-661","l":"Cisco IOS XR 6.6.1"},{"i":"cli-4","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false is a conversion of no dampening is a conversion of set false lacp period short is a conversion of set to frinx-openconfig-lacp:FAST no lacp period short is a conversion of set to frinx-openconfig-lacp:SLOW if is not specified then command bundle id mode on is used mode active is a conversion of set to frinx-openconfig-lacp:ACTIVE mode passive is a conversion of set to frinx-openconfig-lacp:PASSIVE","is conversion of"]},{"i":"unit-3","l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-5","l":"CLI","p":["vlan-tagging is a conversion of set TPID_0X8100 delete interfaces disable is a conversion of set true set interfaces disable is conversion of set false","set interfaces unit disable is conversion of set false delete interfaces unit disable is a conversion of set true"]},{"i":"unit-4","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-6","l":"CLI","p":["is parsed from example is ae100 -> is 100","delete interfaces disable is a conversion of set true set interfaces disable is conversion of set false"]},{"i":"unit-5","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"i":"cli-7","l":"CLI","p":["delete interfaces disable is a conversion of set true set interfaces disable is conversion of set false In the case of set interfaces ms-x/x/x, set iana-if-type:other instead of iana-if-type:ethernetCsmacd","delete interfaces unit disable is a conversion of set true set interfaces unit disable is conversion of set false"]},{"i":"unit-6","l":"Unit","p":["Link to github : junos-unit"]},{"i":"brocade-v560ft163","l":"Brocade (V5.6.0fT163)"},{"i":"cli-8","l":"CLI","p":["enable is a conversion of set true disable is a conversion of set false"]},{"i":"unit-7","l":"Unit","p":["Link to github : brocade-unit"]},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"i":"cli-9","l":"CLI","p":["is conversion of \"\" inbound, outbound is a conversions of \"\" trust dscp is a conversion of \"\" set true"]},{"i":"unit-8","l":"Unit","p":["Link to github : huawei-unit"]},{"i":"dasan-nos-sfurr56p5","l":"Dasan NOS SFU.RR.5.6p5"},{"i":"cli-10","l":"CLI","p":["is parsed from example is Ethernet1/1 -> is 1/1","is parsed from example is Bundle-Ether100 -> is 100","* port enable * is a conversion of set true* port disable * is a conversion of set false lacp port timeout short is a conversion of set to frinx-openconfig-lacp:FAST no lacp port timeout short is a conversion of set to frinx-openconfig-lacp:SLOW"]},{"i":"unit-9","l":"Unit","p":["Link to github : dasan-unit"]},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"i":"cli-11","l":"CLI","p":["* port enable port * is a conversion of set true* port disable port * is a conversion of set false can be \"default\" or \"rj45\" or \"sfp\" vs-ingress-filter on is a conversion of set true vs-ingress-filter off is a conversion of set false can be \"all\", \"tagged-only\", \"untagged-only\" can be \"Default-RCOS\" or \"NNI-NNI\" forward-unlearned on is a conversion of set true forward-unlearned off is a conversion of set false resolved-cos-remark-l2 true is a conversion of set true resolved-cos-remark-l2 false is a conversion of set false","from usual range (max 4094)","can be \"all\" or \"vlan-tpid\"","l2-cft enable port is a conversion of set to true l2-cft disable port is a conversion of set to false","rstp enable port is a conversion of set to true rstp disable port is a conversion of set to false mstp enable port is a conversion of set to true mstp disable port is a conversion of set to false","port set port auto-neg on is a conversion of set to true port set port auto-neg off is a conversion of set to false can be auto, ten, hundred, gigabit, ten-gig"]},{"i":"unit-10","l":"Unit","p":["Link to github : saos-unit"]},{"l":"Ciena SAOS 8"},{"i":"cli-12","l":"CLI","p":["can be between 0 and 96","port set port auto-neg on is a conversion of set to true port set port auto-neg off is a conversion of set to false can be auto, ten, hundred, gigabit, ten-gig, forty-gig, hundred-gig"]},{"i":"unit-11","l":"Unit","p":["Link to github : saos-unit"]},{"i":"arris-cer-arris-e6000","l":"Arris CER (Arris E6000)"},{"i":"cli-13","l":"CLI","p":["no shutdown is a conversion of set true shutdown is a conversion of set false"]},{"i":"unit-12","l":"Unit","p":["Link to github : cer-unit"]}],[{"l":"L2VLAN interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"Ciena SAOS8"},{"l":"CLI"},{"l":"Unit","p":["Link to github : saos-unit"]}],[{"l":"L3 VLAN interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"dasan-nos-sfurr56p5","l":"Dasan NOS SFU.RR.5.6p5"},{"l":"CLI","p":["is parsed from example is Vlan10 -> is 10","no shutdown is a conversion of set true shutdown is a conversion of set false no ip redirects is a conversion of set false ip redirects is a conversion of set true"]},{"l":"Unit","p":["Link to github : dasan-unit"]}],[{"i":"link-aggregation-group-bundle-interface","l":"Link Aggregation Group (bundle) interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"Cisco IOS XE","p":["track shutdown is a conversion of set true no track shutdown is a conversion of set false"]},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)","p":["is conversion of"]},{"i":"unit-1","l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false is conversion of no dampening is a conversion of set false ipv6 nd suppress-ra is a conversion of set true"]},{"i":"unit-2","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-623","l":"Cisco IOS XR 6.2.3"},{"i":"cli-1","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false is conversion of no dampening is a conversion of set false ipv6 enable is a conversion of set true no ipv6 enable is a conversion of set false","is conversion of"]},{"i":"unit-3","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661","l":"Cisco IOS XR 6.6.1"},{"i":"cli-2","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100 is parsed from example is aa:bb:cc:dd:ee:ff -> aabb.ccdd.eeff is parsed from example is aa:bb:cc:dd:ee:ff -> aabb.ccdd.eeff","is conversion of no shutdown is a conversion of set true shutdown is a conversion of set false no dampening is a conversion of set false","is parsed from example is Bundle-Ether100 -> is 100","is conversion of"]},{"i":"unit-4","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"i":"cli-3","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false is conversion of ethernet cfm is a conversion of set to true no ethernet cfm is a conversion of set to false"]},{"i":"unit-5","l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-4","l":"CLI","p":["is parsed from example is ae100 -> is 100","delete interface ae disable is a conversion of set true set interface ae disable is conversion of set false","Device does not support damping on LAG interface."]},{"i":"unit-6","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"i":"cli-5","l":"CLI","p":["is parsed from example is ae100 -> is 100","delete interface ae disable is a conversion of set true set interface ae disable is conversion of set false","inner_vlan_tag, outer_vlan_tag is a conversion of set . delete interface ae unit disable is a conversion of set true set interface ae unit disable is conversion of set false rpm_ifc_index , rpm_subintf_index is a conversion of set ."]},{"i":"unit-7","l":"Unit","p":["Link to github : junos-unit"]},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"i":"cli-6","l":"CLI","p":["is conversion of"]},{"i":"unit-8","l":"Unit","p":["Link to github : huawei-unit"]},{"i":"dasan-nos-sfurr56p5","l":"Dasan NOS SFU.RR.5.6p5"},{"i":"cli-7","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100 and prefix is Bundle-Ether Dasan supports two kinds of prefixes (Prefix is settled by lag type)","If the prefix of is 'Trunk', lag type is port trunking","If the prefix of is 'Bundle-Ether', lag type is lacp","vlan add br t/ tagged is only supported by port trunking vlan add br t/ untagged is only supported by port trunking"]},{"i":"unit-9","l":"Unit","p":["Link to github : dasan-unit"]},{"l":"Ciena SAOS8"},{"i":"cli-8","l":"CLI","p":["classifier-precedence is used as **. This field is mandatory in Ciena and unique withing parent-port. bin_count can be from \"0\" to \"96\". Default value is \"32\". when ** is set to true, then vlan-untagged-data is used in the the sub-port command. there is not possible to set vlan-untagged-data and vtag-stack both."]},{"i":"unit-10","l":"Unit","p":["Link to github : saos-unit"]},{"l":"Arris CER"},{"i":"cli-9","l":"CLI"},{"i":"unit-11","l":"Unit","p":["Link to github : cer-unit"]}],[{"l":"WIDEBAND interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE 16"},{"l":"CLI"},{"l":"Unit","p":["Link to github : ios-xe-unit"]}],[{"i":"internet-protocol-security-ipsec","l":"Internet Protocol Security (IPsec)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"nokia-sros-160","l":"NOKIA SROS 16.0"},{"l":"CLI","p":["no shutdown is a conversion of set to true shutdown is a conversion of set to false"]}],[{"l":"NetFlow"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["Assumption is that monitor map and sampler map configuration already exist on a device."]},{"l":"Unit","p":["Link to github : xr-unit"]}],[{"l":"L2P2P configuration"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-vios-1562t","l":"Cisco IOS (VIOS 15.6(2)T)"},{"l":"CLI","p":["If connection points remote and local without subif","If connection points remote and local with subif","If both connection points are type local without subif","If both connection points are type local with subif"]},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xr-513-612","l":"CISCO IOS XR (5.1.3) (6.1.2)"},{"i":"cli-1","l":"CLI","p":["If connection point type remote","If connection point type local without subif","If connection point type local with subif (for XRv 5.1.3)","If connection point type local with subif (for XRv 6.1.2)","If both connection points are local we can use the same translation code as above .. the combined output will look like this example:"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"brocade-v560ft163","l":"Brocade (V5.6.0fT163)"},{"i":"cli-2","l":"CLI","p":["If connection point type remote","If connection point type local without subif","If both connection points are type local","With subif","If both connection points are type local without subif"]},{"i":"unit-2","l":"Unit","p":["Link to github : brocade-unit"]}],[{"i":"l2vpn-vpls-with-bgp-autodiscovery-configuration","l":"L2VPN (VPLS with BGP autodiscovery) configuration"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-not-fully-tested-yet--vios-does-not-support-vpls","l":"Cisco IOS (not fully tested yet ... vIOS does not support VPLS)"},{"l":"CLI","p":["If connection point type remote","If connection point type local without subif","If connection point type local with subif"]},{"l":"Unit","p":["NOT IMPLEMENTED"]},{"i":"cisco-ios-xr-513-612","l":"CISCO IOS XR (5.1.3) (6.1.2)"},{"i":"cli-1","l":"CLI","p":["If connection point type remote","If connection point type local without subif","If connection point type local with subif (for XRv 5.1.3)","If connection point type local with subif (for XRv 6.1.2)"]},{"i":"unit-1","l":"Unit","p":["NOT IMPLEMENTED"]}],[{"i":"l2vsi-l2-virtual-switch-instance-virtual-circuit","l":"L2VSI (L2 virtual switch instance virtual circuit)","p":["Interconnects L2VSI with a vlan-based upstream path"]},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"l":"CLI","p":["statistics on is a conversion of set true statistics off is a conversion of set false"]},{"l":"Unit"}],[{"i":"l2vsi-l2-virtual-switch-instance","l":"L2VSI (L2 virtual switch instance)"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"l":"CLI","p":["can have values *l2-cft tagged-pvst-l2pt enable vs * is a conversion of \"tagged-pvst-l2pt\" field set to true *l2-cft tagged-pvst-l2pt disable vs * is a conversion of \"tagged-pvst-l2pt\" field set to false"]},{"l":"Ciena SAOS 8"},{"i":"cli-1","l":"CLI","p":["cpu-subinterface command is sent, if the type of the interface added is iana-if-type:l2vlan","sub-port command is sent, if the type of the interface added is iana-if-type:ieee8023adLag","** in this case needs to have form . This can be derived from : https://github.com/FRINXio/translation-units-docs/blob/master/Configuration%20datasets/interfaces/lag_interface.md"]}],[{"i":"l3vpn-configuration-bgp-as-ce-pe-protocol","l":"L3VPN configuration (BGP as CE-PE protocol)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["CONSTRAINTS Network-instance with name must exist before defined-sets or both must be created in the same transaction. Delete must be executed in reverse order or in the same transaction. Policy -route-target-import and -route-target-export must exist on device before are used in network-instance."]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-513-612","l":"CISCO IOS XR (5.1.3) (6.1.2)"},{"l":"CLI"},{"i":"cisco-ios-xr-661","l":"CISCO IOS XR (6.6.1)"},{"i":"cli-1","l":"CLI","p":["summary-only is a conversion of \"frinx-bgp-extension:summary-only\" set true"]},{"i":"cisco-ios-vios-1562t","l":"Cisco IOS (VIOS 15.6(2)T)"},{"i":"cli-2","l":"CLI"},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"i":"cli-3","l":"CLI"},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"i":"cli-4","l":"CLI","p":["1 to 100000 is conversion of set \"frinx-huawei-network-instance-extension:prefix-limit-from\" 1 to 100 is conversion of set \"frinx-huawei-network-instance-extension:prefix-limit-to\""]},{"l":"Unit","p":["Link to github : huawei-unit"]}],[{"i":"l3vpn-configuration-ospf-as-ce-pe-protocol","l":"L3VPN configuration (OSPF as CE-PE protocol)"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-513-612","l":"CISCO IOS XR (5.1.3) (6.1.2)"},{"l":"CLI"},{"i":"cisco-ios-xr-623","l":"CISCO IOS XR (6.2.3)"},{"i":"cli-1","l":"CLI"},{"i":"cisco-ios-xr-661","l":"CISCO IOS XR (6.6.1)"},{"i":"cli-2","l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-vios-1562t","l":"Cisco IOS (VIOS 15.6(2)T)"},{"i":"cli-3","l":"CLI"},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-4","l":"CLI","p":["virtual-router is a conversion of set L3VRF delete routing-instances protocols ospf area interface disable is a conversion of set true set routing-instances protocols ospf area interface disable is a conversion of set false set routing-instances protocols ospf area interface authentication is a conversion of set true delete routing-instances protocols ospf area interface authentication is a conversion of set false"]},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"i":"cli-5","l":"CLI"}],[{"i":"multiprotocol-label-switching---label-distribution-protocol-mpls-ldp","l":"Multiprotocol Label Switching - Label Distribution Protocol (MPLS LDP)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models","extensions to MPLS YANG model"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["\"enabled\" MUST be set to true when any ldp-configuration is pushed","\"enabled\" set to false, will ignore any additional configuration in the PUT request and will result in 'no mpls ldp'"]},{"l":"Unit","p":["Link to github : xr-unit"]}],[{"i":"multiprotocol-label-switching---resource-reservation-protocol-mpls-rsvp","l":"Multiprotocol Label Switching - Resource Reservation Protocol (MPLS RSVP)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models","extensions to MPLS YANG model"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4","p":["setting to default results in 'bandwidth' meaning setting default device bandwidth","setting to numeric value results in 'bandwith < number>","transformation: input bandwith in bps, in XR router as Kbps"]},{"l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10","p":["transformation: k,m,g from JUNOS router translates to thousand, million, billion"]},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to github : junos-unit"]}],[{"i":"multiprotocol-label-switching---traffic-engineering-mpls-te","l":"Multiprotocol Label Switching - Traffic Engineering (MPLS-TE)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models","extensions to MPLS YANG model"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["\"enabled\" MUST be set to true when any te-configuration is pushed","\"enabled\" set to false, will ignore any additional configuration in the PUT request and will result in 'no mpls traffic-eng'"]},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to github : junos-unit"]}],[{"i":"multiprotocol-label-switching---tunnel","l":"Multiprotocol Label Switching - Tunnel"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models","extensions to MPLS YANG model"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["autoroute announce is a conversion of set true no autoroute announce is a conversion of set false load-share is not supported on virtual platform CISCO IOS-XR mpls_tunnel_destination is optional parameter metric absolute command is only valid if autoroute announce is set"]},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-1","l":"CLI","p":["* set protocols mpls label-switched-path * is a conversion of set true mpls_tunnel_destination is mandatory parameter"]},{"i":"unit-1","l":"Unit","p":["Link to github : junos-unit"]}],[{"l":"Interface policy configuration"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["policy-forwarding YANG model","extensions to policy-forwarding YANG model"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534-ios-xr-662","l":"Cisco IOS XR 5.3.4, IOS XR 6.6.2"},{"l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-623","l":"Cisco IOS XR 6.2.3"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661","l":"Cisco IOS XR 6.6.1"},{"i":"cli-2","l":"CLI"},{"i":"unit-2","l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-3","l":"CLI","p":["pf_intf_index, pf_subintf_index is a conversion of set ."]},{"i":"unit-3","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-4","l":"CLI"},{"i":"unit-4","l":"Unit","p":["Link to github : junos-unit"]}],[{"i":"border-gateway-protocol-bgp","l":"Border Gateway Protocol (BGP)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST remove-private-AS is a conversion of default-originte is a conversion of next-hop-self is a conversion of if value is \"nexthopself\" no shutdown is a conversion of set true shutdown is a conversion of set false ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST vpnv4 unicast is a conversion of set L3VPN_IPV4_UNICAST"]},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661-via-netconf","l":"Cisco IOS XR 6.6.1 (via NetConf)"},{"i":"cli-1","l":"CLI","p":["l2vpn evpn is a conversion of set L2VPN_EVPN remove-private-AS is a conversion of default-originte is a conversion of next-hop-self is a conversion of if value is \"nexthopself\" no shutdown is a conversion of set true shutdown is a conversion of set false l2vpn evpn is a conversion of set L2VPN_EVPN"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661-no-netconf","l":"Cisco IOS XR 6.6.1 (no NetConf)"},{"i":"cli-2","l":"CLI","p":["vpnv4 unicast is a conversion of set L3VPN_IPV4_UNICAST"]},{"i":"unit-2","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"i":"cli-3","l":"CLI","p":["ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST remove-private-AS is a conversion of default-originte is a conversion of ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST"]},{"i":"unit-3","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xe-031301s","l":"Cisco IOS XE 03.13.01.S","p":["bgp log-neighbor-changes is a conversion of set true no bgp log-neighbor-changes is a conversion of set false default-information originate is a conversion of set true no default-information originate is a conversion of set false neighbor as-override is a conversion of set true neighbor fall-over bfd is a conversion of set true transport connection-mode passive is a conversion of set true route-reflector-client is a conversion of set true remove-private-as is a conversion of set \"frinx-openconfig-bgp-types:PRIVATE_AS_REMOVE_ALL\" no-prepend is a conversion of set true replace-as is a conversion of set true neighbor version 4 is a conversion of set \"frinx-bgp-extension:VERSION_4\" auto-summary is a conversion of set true no auto-summary is a conversion of set false* redistribute connected route-map * is a conversion of set true no redistribute connected is a conversion of set false* redistribute static route-map * is a conversion of set true no redistribute static is a conversion of set false synchronization is a conversion of set true no synchronization is a conversion of set false"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-4","l":"CLI","p":["activate is a conversion of set true deactivate is a conversion of set false"]},{"i":"unit-4","l":"Unit","p":["Link to github : junos-unit"]},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"i":"cli-5","l":"CLI","p":["auto-discovery is conversion of set \"frinx-bgp-extension:transport\" keepalive is conversion of set \"timer_mode\" 0-21845 is conversion of set \"time_before\" 3-65535 is conversion of set \"timer_after\" direct, static is conversions of set \"import_route\""]},{"i":"unit-5","l":"Unit","p":["Link to github : huawei-unit"]}],[{"i":"intermediate-system-to-intermediate-system-is-is","l":"Intermediate System to Intermediate System (IS-IS)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-623","l":"Cisco IOS XR 6.2.3"},{"l":"CLI","p":["point-to-point is a conversion of set POINT_TO_POINT","value frinx-openconfig-isis-types:IPV6 is to be converted to ipv6 value frinx-openconfig-isis-types:UNICAST is to be converted to unicast value LEVEL_1 is to be converted to level-1 value LEVEL_2 is to be converted to level-2 value LEVEL_1_2 is to be converted to level-1-2"]},{"i":"cisco-ios-xr-661cli","l":"Cisco IOS XR 6.6.1(CLI)"},{"i":"cli-1","l":"CLI","p":["value frinx-isis-extension:NOT_SET is to be converted to max-link-metric value frinx-isis-extension:LEVEL_1 is to be converted to max-link-metric level 1 value frinx-isis-extension:LEVEL_2 is to be converted to max-link-metric level 2","value frinx-openconfig-isis-types:IPV6 is to be converted to ipv6 value frinx-openconfig-isis-types:UNICAST is to be converted to unicast","is converted from .","if is LEVEL_1, then is set as level-1","if is LEVEL_2, then is set as level-2","if is LEVEL_1_2, then is set as level-1-2"]}],[{"i":"open-shortest-path-first-ospf","l":"Open Shortest Path First (OSPF)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands","p":["include-stub is a conversion of MAX_METRIC_INCLUDE_STUB in the include list of the max-metric-timer external-lsa is a conversion of MAX_METRIC_INCLUDE_TYPE2_EXTERNAL in the include list of the max-metric-timer summary-lsa is a conversion of MAX_METRIC_SUMMARY_LSA in the include list of the max-metric-timer"]},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["bfd fast-detect is a conversion of set true bfd fast-detect disable is a conversion of set false mpls ldp sync is a conversion of set true mpls ldp sync disabled is a conversion of set false passive enable is a conversion of set true passive disabled is a conversion of set false","** value MAX_METRIC_ON_SYSTEM_BOOT is to be converted to on-startup** value MAX_METRIC_ON_SWITCHOVER is to be converted to on-switchover"]},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-2","l":"CLI","p":["delete protocols ospf area interface disable is a conversion of set true set protocols ospf area interface disable is a conversion of set false"]},{"i":"unit-2","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-3","l":"CLI"},{"i":"unit-3","l":"Unit","p":["Link to github : junos-unit"]}],[{"i":"open-shortest-path-first-v3-ospfv3","l":"Open Shortest Path First v3 (OSPFv3)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["value STUB_ROUTER_MAX_METRIC is to be converted to max-metric value STUB_ROUTER_R_BIT is to be converted to r-bit value STUB_ROUTER_V6_BIT is to be converted to v6-bit"]},{"l":"Unit","p":["Link to github : xr-unit"]}],[{"l":"Static Route"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS 12"},{"l":"CLI","p":["ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST is parsed from is parsed from"]},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"i":"cli-1","l":"CLI","p":["ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST is parsed from is parsed from"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]}],[{"l":"VLAN"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI","p":["no shutdown is a conversion of set ACTIVE shutdown is a conversion of set SUSPENDED"]},{"i":"dasan-nos-sfurr56p5","l":"Dasan NOS SFU.RR.5.6p5"},{"i":"cli-1","l":"CLI","p":["if is true","if is false"]},{"l":"Ciena SAOS 614"},{"i":"cli-2","l":"CLI","p":["should be pure numeric, converted from oc-vlan-types:TPID_TYPES from openconfig enable is a conversion of to true disable is a conversion of to false is an enumeration trust-mode - options are client-trusted, server-trusted, dualrole-trusted and untrusted"]}],[{"i":"configure-network-instance-vrf","l":"Configure network instance (VRF)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI"},{"l":"Unit","p":["Link to github : ios-unit"]},{"l":"Configure default network instance"},{"i":"url-1","l":"URL"},{"i":"openconfig-yang-1","l":"OPENCONFIG YANG","p":["YANG models","vlans definition - vlans policy-forwarding definition - policy-forwarding protocols definition - protocols interface-name is a conversion of each interface name for this network-instance cisco-ipv6-config is a conversion of global ipv6 configuration for device and consist of: unicast-routing and cef whose values can only be true or false"]},{"i":"os-commands-1","l":"OS Commands"},{"i":"cisco-ios-classic-1524s5","l":"Cisco IOS Classic (15.2(4)S5)"},{"i":"cli-1","l":"CLI"}],[{"l":"Routing Policy"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-12-ios-15-ios-xe-15-ios-xe-16-ios-xe-17","l":"Cisco IOS 12, IOS 15, IOS XE 15, IOS XE 16, IOS XE 17","p":["permit is a conversion of set to frinx-cisco-routing-policy-extension:PERMIT deny is a conversion of set to frinx-cisco-routing-policy-extension:DENY","permit is a conversion of set to community-member deny is a conversion of set to frinx-openconfig-bgp-policy-extension:community-member-deny","permit is a conversion of set to frinx-cisco-routing-policy-extension:PERMIT deny is a conversion of set to frinx-cisco-routing-policy-extension:DENY set community (no-export) is a conversion of set to frinx-openconfig-bgp-types:NO_EXPORT set community (no-advertise) is a conversion of set to frinx-openconfig-bgp-types:NO_ADVERTISE* match tag * is a tag element in match clause set to frinx-cisco-routing-policy-extension:tags"]},{"i":"cisco-ios-xr-534-ios-xr-662","l":"Cisco IOS XR 5.3.4, IOS XR 6.6.2"},{"l":"CLI","p":["is parsed from .","If is \"exact\", then is not set.","If matches to pattern of .., then is set as \"le ge \".","* destination in * is a conversion of set to ANY* not destination in * is a conversion of set to INVERT","as-path length le is a conversion of set to frinx-openconfig-policy-types:ATTRIBUTE_LE as-path length ge is a conversion of set to frinx-openconfig-policy-types:ATTRIBUTE_GE as-path length eq is a conversion of set to frinx-openconfig-policy-types:ATTRIBUTE_EQ","community match-any is a conversion of set to ANY community match-every is a conversion of set to ALL","drop is a conversion of set to REJECT_ROUTE done is a conversion of set to ACCEPT_ROUTE pass is a conversion of set to PASS_ROUTE","set community (no-export) is a conversion of set to frinx-openconfig-bgp-types:NO_EXPORT set community (no-advertise) is a conversion of set to frinx-openconfig-bgp-types:NO_ADVERTISE set community (local-as) is a conversion of set to frinx-openconfig-bgp-types:NO_EXPORT_SUBCONFED","* as-path in * is a conversion of set to ANY* not as-path in * is a conversion of set to INVERT"]},{"l":"Examples"},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-1","l":"CLI"},{"i":"examples-1","l":"Examples"}],[{"i":"aaa---authentication-authorization-accounting","l":"AAA - Authentication Authorization Accounting"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"l":"CLI","p":["local, radius is conversions of set to \"authentication-method\" local, radius is conversions of set to \"accounting-method\" start-fail is conversion of set to \"fail-policy\" online is conversion of set to \"fail-policy-mode\" telnet, terminal, ssh, ftp is conversions of set to \"frinx-huawei-aaa-extension:service-type\" 1-15 is conversion of set to \"frinx-huawei-aaa-extension:privilege-level\""]},{"l":"Unit","p":["Link to GitHub : huawei-unit"]},{"l":"SAOS 6"},{"i":"cli-1","l":"CLI","p":["limited, admin, super, diag is conversion of set to \"frinx-ciena-aaa-extension:access-level\""]},{"i":"unit-1","l":"Unit","p":["Link to GitHub : saos6-unit"]}],[{"i":"broadcast-containment-broadcast-containment-filters","l":"Broadcast-Containment (Broadcast-containment filters)"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"l":"CLI","p":["enable is conversion of set true disable is conversion of set false"]},{"l":"Unit"}],[{"l":"Configure CDP interfaces"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI","p":["cdp enable is conversion of \"enabled\": true no cdp enable is conversion of \"enabled\": false"]},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xr-xrv-513-and-xrv-612-tested","l":"Cisco IOS XR (XRv 5.1.3 and XRv 6.1.2 tested)"},{"i":"cli-1","l":"CLI","p":["cdp is conversion of \"enabled\": true no cdp is conversion of \"enabled\": false"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"brocade-v560ft163","l":"Brocade (V5.6.0fT163)"},{"i":"cli-2","l":"CLI","p":["cdp enable is conversion of \"enabled\": true no cdp enable is conversion of \"enabled\": false"]},{"i":"unit-2","l":"Unit","p":["Link to github : brocade-unit"]}],[{"l":"Configure FDP interfaces"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"i":"brocade-v560ft163","l":"Brocade (V5.6.0fT163)"},{"l":"CLI","p":["fdp enable is conversion of \"enabled\": true no fdp enable is conversion of \"enabled\": false"]},{"l":"Unit","p":["NOT IMPLEMENTED"]}],[{"l":"Configure STP interfaces"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"brocade-v560ft163","l":"Brocade (V5.6.0fT163)"},{"l":"CLI","p":["If /stp/interfaces/interface/ exists","If /interfaces/interface/ exists and /stp/interfaces/interface/ does not exist"]},{"l":"Unit","p":["NOT IMPLEMENTED"]}],[{"i":"ethernet-oam--ethernet-cfm","l":"Ethernet OAM / Ethernet CFM"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"l":"CLI","p":["ethernet cfm is a conversion of set to true no ethernet cfm is a conversion of set to false efd is a conversion of set to true no efd is a conversion of set to false"]},{"l":"Unit","p":["Link to github : xr-unit"]}],[{"i":"ethernet-virtual-circuit-evc","l":"Ethernet Virtual Circuit (EVC)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xe-16","l":"Cisco IOS XE 16.*"},{"l":"CLI","p":["* ethernet evc * is creating evc configuration with name* no ethernet evc * is deleting evc configuration with name"]},{"l":"Unit","p":["Link to github : ios-xe-evc-unit"]}],[{"i":"ethernet-virtual-private-network-evpn","l":"Ethernet Virtual Private Network (EVPN)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-661-via-netconf","l":"Cisco IOS XR 6.6.1 (via NetConf)"},{"l":"CLI","p":["evpn is a conversion of set to true no evpn is a conversion of set to false is parsed from example is Bundle-Ether100 -> is 100 is parsed from Bundle-Ether is a conversion of set to \"iana-if-type:ieee8023adLag\" mode port-active is a conversion of set to \"frinx-es-lb-mode:PORT-ACTIVE\" mode single-active is a conversion of set to \"frinx-es-lb-mode:SINGLE-ACTIVE\""]},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661-no-netconf","l":"Cisco IOS XR 6.6.1 (no NetConf)"},{"i":"cli-1","l":"CLI","p":["evpn is a conversion of set true no evpn is a conversion of set false cost-out is a conversion of set true no cost-out is a conversion of set false or null"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]}],[{"i":"hot-standby-router-protocol-hsrp","l":"Hot Standby Router Protocol (HSRP)"},{"l":"URL"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]}],[{"i":"l2-cft-layer-2-control-frame-forwarding","l":"L2-Cft (Layer 2 Control Frame Forwarding)"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"l":"CLI","p":["can be can be <802.1x | all-bridges-block | cisco-cdp | cisco-dtp | cisco-pagp | cisco-pvst | cisco-stp-uplink-fast | cisco-udld | cisco-vtp | elmi | esmc | garp-block | gmrp | gvrp | lacp | lacp-marker | lldp | oam | ptp-peer-delay | vlan-bridge | xstp> if == mef-ce1 -> can be also bridge-block if == mef-ce2 -> can be also can be "]},{"l":"Unit"}],[{"i":"logging-syslog","l":"Logging (syslog)"},{"l":"URL"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-623","l":"Cisco IOS XR 6.2.3"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661","l":"Cisco IOS XR 6.6.1"},{"i":"cli-2","l":"CLI"},{"i":"unit-2","l":"Unit","p":["Link to github : xr-unit"]}],[{"l":"Privilege"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-12-15-16--ios-xe-15-16-17","l":"Cisco IOS 12, 15, 16 / IOS XE 15, 16, 17"},{"l":"CLI"},{"l":"Unit","p":["Link to github : ios-privilege-unit"]}],[{"l":"Probes"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"l":"CLI","p":["set services rpm probe delegate-probes is a conversion of < delegate-probes> set true set services rpm probe test target address is a conversion of < target-type> set address"]},{"l":"Unit","p":["Link to github : junos-unit"]}],[{"l":"Quality of Service"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"i":"url-1","l":"URL"},{"i":"openconfig-yang-1","l":"OPENCONFIG YANG"},{"i":"url-2","l":"URL"},{"i":"openconfig-yang-2","l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-12-15-16--ios-xe-15-16-17","l":"Cisco IOS 12, 15, 16 / IOS XE 15, 16, 17"},{"l":"CLI"},{"l":"Usage","p":["A term marks one or more conditions depending on the class-map type.","When class-map type: match-all, there is just one term, that MUST be called 'all'.","When class-map type: match-any, the terms are numbered from 1 ... number_of_conditions. In this case, the {{term_id}} marks the line, where the conditions specified in conditions is written."]},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"i":"cli-1","l":"CLI"},{"i":"usage-1","l":"Usage","p":["A term marks one or more conditions depending on the class-map type.","When class-map type: match-all, there is just one term, that MUST be called 'all'.","When class-map type: match-any, the terms are numbered from 1 ... number_of_conditions. In this case, the {{term_id}} marks the line, where the conditions specified in conditions is written.","Example:","will create 5 terms numbered from 1 to 5, where term 1 contains condition for qos-group, term 2 contains condition for mpls, etc.","Writing will occur in ascending order. Reading is the same, first condition is put into first term, etc."]},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"i":"cli-2","l":"CLI"},{"l":"Unit","p":["Link to github : huawei-unit"]},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"i":"cli-3","l":"CLI","p":["traffic-profiling enable is a conversion of {{qos_enabled}} set to true traffic-profiling disable is a conversion of {{qos_enabled}} set to false","{{scheduler_type}} can be port_policy- this issues traffic-profiling commands. The {{scheduler_seq}} will be always 0, there can be just one scheduler of this type.{{scheduler_type}} can be queue_group_policy- this issues traffic-services command. The {{scheduler_seq}} is represented by queue number."]}],[{"l":"Relay Agent"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"SAOS 6"},{"l":"CLI","p":["true or false is conversion of set to \"enable\" rid-string, device-hostname, device-mac is conversion of set to \"remote_id_type\" true or false is conversion of set to \"replace-option82\""]},{"l":"Unit","p":["Link to GitHub : saos6-unit"]},{"l":"SAOS 8"},{"i":"cli-1","l":"CLI","p":["true or false is conversion of set to \"enable\" rid-string, device-hostname, device-mac is conversion of set to \"remote_id_type\" true or false is conversion of set to \"replace-option82\""]},{"i":"unit-1","l":"Unit","p":["Link to GitHub : saos8-unit"]}],[{"i":"simple-network-management-protocol-snmp","l":"Simple Network Management Protocol (SNMP)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"i":"url-1","l":"URL"},{"i":"openconfig-yang-1","l":"OPENCONFIG YANG"},{"i":"url-2","l":"URL"},{"i":"openconfig-yang-2","l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI"},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"i":"cli-1","l":"CLI","p":["By default enabled on all interfaces. To disable, use:","To enable disabled interfaces use:","enabled:true is a conversion of snmp set enabled","enabled:false is a conversion of snmp set disabled"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-2","l":"CLI"},{"i":"unit-2","l":"Unit","p":["Link to github : junos-unit"]}],[{"l":"System-wide services and functions"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE ASR920"},{"l":"CLI"},{"l":"Unit","p":["Link to github : [ios-xe-unit]"]}],[{"i":"#","p":["Network Instances","Protocols","BGP summary","BGP RIB","OSPF summary","Discovery protocols","CDP","LLDP"]},{"l":"Network Instances"},{"l":"Protocols"},{"l":"BGP summary"},{"l":"BGP RIB"},{"l":"OSPF summary"},{"l":"Discovery protocols"},{"l":"CDP"},{"l":"LLDP"}],[{"i":"bgp-global--neighbors","l":"BGP global + neighbors"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI"},{"l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : ios-unit"]},{"i":"cisco-xr-612","l":"Cisco XR 6.1.2"},{"l":"Netconf"},{"l":"Device YANG","p":["Link to github : xml-sample"]},{"i":"unit-1","l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : xr-unit"]}],[{"l":"BGP RIB"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI","p":["'*' (valid route) translates to \"valid-route\" : true'i' (internal) translates to \"origin\": \"i\""]},{"l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : ios-unit"]}],[{"i":"show-router-ospf-type-id-interfaces","l":"Show router ospf type, ID, interfaces"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI","p":["Supporting command to determine OPSF - VRF relationships:","Supporting command to show interfaces"]},{"l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : ios-unit"]},{"i":"cisco-xr-612","l":"Cisco XR 6.1.2"},{"l":"Netconf"},{"l":"Device YANG","p":["Link to github : xml-sample"]},{"i":"unit-1","l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : xr-unit"]}],[{"l":"Interfaces"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"CER Arris devices"},{"l":"CLI"},{"l":"Unit","p":["Link to GitHub : cer-unit"]}],[{"l":"Platform"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"Cisco IOS Classic"},{"l":"CLI"},{"l":"Unit","p":["Link to GitHub : ios-unit"]},{"i":"cisco-ios-xe-15-16-17","l":"Cisco IOS XE 15, 16, 17"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to GitHub : ios-xe-unit"]},{"l":"Ciena SAOS 6"},{"i":"cli-2","l":"CLI"},{"i":"unit-2","l":"Unit","p":["Link to GitHub : saos6-unit"]},{"l":"Ciena SAOS 8"},{"i":"cli-3","l":"CLI"},{"i":"unit-3","l":"Unit","p":["Link to GitHub : saos8-unit"]},{"l":"CER Arris devices"},{"i":"cli-4","l":"CLI"},{"i":"unit-4","l":"Unit","p":["Link to GitHub : cer-unit"]}],[{"l":"Show CDP interfaces and neighbors"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI"},{"l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : ios-unit"]},{"i":"cisco-xr-612","l":"Cisco XR 6.1.2"},{"l":"Netconf"},{"l":"Device YANG","p":["Link to github : xml-sample"]},{"i":"unit-1","l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : xr-unit"]}],[{"l":"Show LLDP interfaces and neighbors"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI"},{"l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : ios-unit"]},{"i":"cisco-xr-612","l":"Cisco XR 6.1.2"},{"l":"Netconf"},{"l":"Device YANG","p":["Link to github : xml-sample"]},{"i":"unit-1","l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : xr-unit"]}],[{"l":"System"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"Ciena SAOS 6"},{"l":"CLI"},{"l":"Unit","p":["Link to GitHub : saos6-unit"]},{"l":"Ciena SAOS 8"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to GitHub : saos8-unit"]}],[{"l":"Table of Contents","p":["=================","Base Handlers","Base readers","Base writers","Best practices for handlers (readers/writers)","Chunk templates","CLI Init Translation Unit","CLI Translation Unit","Device registration","Documentation","Finding mapping between device and the model","Handlers","Mandatory interfaces to implement","Module structure","NETCONF Unified Translation Unit","OpenConfig to device config mapping","Plaintext parsing hints","Readers","TranslateUnit","Translation Framework","Translation units for different device versions","Translation Units in general","Util classes","Writers"]},{"l":"Translation Framework","p":["The translation framework allows translation units to:","Add YANG model into the system","Register Handlers for all or a subset of nodes defined in the YANG model","Register the entire unit into the system, which is then able to perform"]},{"l":"OpenConfig to device config mapping"},{"l":"Finding mapping between device and the model","p":["Preferred YANG models for device config and operational data are OpenConfig models.","These models usually represents configuration part in container config and operational part in container state. Operational data is config data + operational data.","This site http://ops.openconfig.net/branches/master/ may be used for better browsing in OpenConfig YANG models. Another option is to generate YANG tree representation by using generate_html.sh in https://github.com/FRINXio/openconfig.","YANG models used in UniConfig framework need to be located in https://github.com/FRINXio/openconfig. In case the desired functionality is not modeled yet, you can create new YANG with its own structure or it can augment existing OpenConfig models. Guideline, how to write OpenConfig models can be found at http://www.openconfig.net/docs/style-guide/."]},{"l":"Documentation","p":["There is translation-units-docs page as a single point of truth for mapping. Use __ notation for variables in the templates. This notation is postman compatible."]},{"l":"Translation Units in general"},{"l":"Module structure","p":["Translation unit is a self contained project which implements a mapping between OpenConfig based YANG models and device specific configuration. It is used by the FRINX ODL to perform translation between device specific configuration model and standard (OpenConfig) models. A unit usually consists of:","Handlers","Readers","Writers","TranslateUnit implementation","RPCs"]},{"l":"Handlers","p":["Each complex node in YANG (container, list, augment...) should have a dedicated handler (Reader, Writer)","This enables extensibility, readability and the framework can easily filter and process the data this way","Unless there is a need to also handle child nodes, in which case register the handler using subtreeAdd method from the registries","There are 2 types of handlers: Readers (Read operation) and Writers (Create, Update, Delete operation)","One can implement just the readers or both readers and writers for YANG models. Writers must have counterpart readers because of reconciliation.","Readers and Writers should use the InstanceIdentifier parameter they receive in readCurrentAttributes or writeCurrentAttributes methods to find information about keys for their parent nodes. E.g. Reader registered under ID: /interfaces/interface/config will always receive keyed version of that ID: /interface/interface[Loopback0]/config. So it can use method firstKeyOf on InstanceIdentifier to get the keys.","RWUtils class contains methods for InstanceIdentifier manipulation.","Readers and writers can be easily tested and it is necessary to provide unit tests for all of them. It's important to cover readCurrentAttributes and writeCurrentAttributes with all possible scenarios (all data there, no data there, partial data there...)","Writers may use Preconditions.checkArgument() before accessing the device. Fail of the precondition check does not invoke default rollback (opposite operation) on the writer where precondition is located."]},{"l":"Base Handlers","p":["When a handler for the same YANG node is implemented to conform various devices, it tends to lead to a lot of boilerplate and duplicate code. Therefore, we should implement a base handler for such handlers. How does it work:","create a base-project (if there isn't any) to group base handlers (eg. for an interface handler, choose interface-base project)","each base handler needs to be abstract and implement same interfaces as the original handler","extract common functionality in the base handler. Common functionality means that it will conform the majority of the original handlers. If a handler does not share the extracted functionality, it needs to override original interface methods, to hide the extracted functionality.","let original handlers extend base abstract handler"]},{"l":"CLI Translation Unit","p":["CLI Translation units are located in https://github.com/FRINXio/cli-units repository. JAVA is used in CLI translation units."]},{"l":"Readers","p":["Readers are handlers responsible for reading and parsing the data coming from a device","There are 2 types of readers: Reader and ListReader. Reader can be used to handle container or augmentation nodes and ListReader should handle list nodes from YANG.","Both types need to implement readCurrentAttributes to fill the builder with appropriate values","ListReader needs to also implement getAllIds() where it retrieves a key for each item to be present in current list. After the list is received, framework will invoke readCurrentAttributes for each item from getAllIds","Readers should always use overloaded blockingRead method which takes in the ReadContext since that method performs caching internally","Use full version of commands e.g. show running-config interface instead of sh run int"]},{"l":"Mandatory interfaces to implement","p":["Each reader needs to implement one of these interfaces based on type of target node in YANG. These interfaces also contain util methods which may be used for better manipulation with data. For more information about methods please read javadocs.","CliConfigListReader- implement this interface if target composite node in YANG is list and represents config data.","CliConfigReader- implement this interface if target composite node in YANG is container or augmentation and represents config data.","CliOperListReader- implement this interface if target composite node in YANG is list and represents operational data.","CliOperReader- implement this interface if target composite node in YANG is container or augmentation and represents operational data.","In cases where you want to invoke multiple readers on reading one YANG node, extend following abstract classes:","CompositeListReader- extend this abstract class if multiple list readers need to be invoked when reading specific list in YANG.","CompositeReader- extend this abstract class if multiple readers need to be invoked when reading specific node in YANG.","A practical example of their usage is reading network instance based on it's type. All child readers need to implement a check when the particular reader should be invoked or the parent reader should move on to the next reader.","For example child reader for bgp (located under protocol) needs to check if identifier in protocol has value BGP. Otherwise reader for bgp will be invoked even if protocol identifier is OSPF."]},{"l":"Util classes","p":["ParsingUtils- use methods of this util class if you want to parse plaintext to java object builder"]},{"l":"Plaintext parsing hints","p":["Use as specific regular expressions when parsing CLI output as possible","For Cisco CLI devices avoid using section and other advanced formatting parameters. Only | include | exclude and | begin are allowed.","Use CONFIG data as the source of truth when parsing information from device. Except when parsing state containers (or containers explicitly marked as config false).","I.e. use sh run| include router ospf instead of sh ospf when retrieving ospf routers list.","In some cases, it is not possible to just use config data e.g. sh run interface does not show any data for interfaces that have no configuration. In this case it is necessary to use operational information from e.g. sh ip int brief","Use following pattern when parsing multiline output from CLI, where it is difficult to extract lines and their relationships","I.e. when parsing configured BGP neighbors per address family following command can be used: ** sh run | include router bgp| address-family|^ neighbor which results in:","This output can then be parsed by:","Remove newlines to get a single line of string","Replace \"router\" with \"\\nrouter\" to separate bgp routers per line","Find the line that matches required router bgp","Take that line and replace \"address-family\" with \"\\naddress-family\" to get address-family neighbors per line"]},{"l":"Base Readers","p":["Each base reader should contain abstract methods:","String getReadCommand()- each child reader should fill in the read command used to get information needed for this reader. Arguments may vary and they are used to be more specific in the read command (eg. when creating a command to gather information about a specific interface, you may want to pass interface name as argument).","Pattern getLine(\\args>)- there may be more such methods and they are used to get the regular expression needed to parse output of the command (eg. in case of interface reader, you will create methods getDescriptionLine, getShutdownLine etc.)","Note: naming of the methods should be unified in order to be easily parsed by auto-generated documentation."]},{"l":"Writers","p":["A writer needs to implement all 3 methods: Write, Update, Delete in order to fully support default rollback mechanism of the framework","Time showed that update like 1. delete, 2. write is anti-pattern and should not be used. There is just one case where it is necessary: when re-writing list entry, you must first delete the previous entry, then write the new one, otherwise the previous entry would still be present and the new entry will be added to the list.","A writer can properly work only if there is a reader for the same composite node","A writer should check whether the command it executed was handled by the device properly (by checking the output) and if not throw one of the Write/Update/Delete FailedException","Chunk templating framework is preferred to use in writers it gives us:","Null safety","if/loop etc. inside templates","Default values and many more","Use full version of commands e.g. configure terminal instead of conf t"]},{"i":"mandatory-interfaces-to-implement-1","l":"Mandatory interfaces to implement","p":["Each writer needs to implement one of these interfaces based on type of target node in YANG. Unlike mandatory interfaces for reading, only interfaces for writing config data are available (because it is not possible to write operational data). These interfaces also contain util methods which may be used for better manipulation with data. For more information about methods please read javadocs.","All writers override updateCurrentAttributes method and avoid delete/write combination, unless specified in a comment.","CliListWriter- implement this interface if target composite node in YANG is list. An implementation needs to be registered as GenericListWriter.","CliWriter- implement this interface if target composite node in YANG is container or augmentation. An implementation needs to be registered as GenericWriter.","CompositeWriter- extend this abstract class when multiple writers need to be invoked on one YANG node. The writers need to implement a check whether or not should they be invoked."]},{"l":"Base Writers","p":["Each base writer should contain abstract methods:","String updateTemplate(Config before, Config after)- this method returns Chunk template used for writing and updating data on the device.","String deleteTemplate(Config data)- this method returns Chunk template used for deleting data from device.","Note: if updating data is done differently than writing new data, method String writeTemplate(Config data) might be used as well."]},{"l":"Chunk Templates","p":["Each original writer transformed to use a base writer should have all it's templates written in Chunk. We extended Chunk to achieve easier manipulation with data. There is now a new filter called update. It's usage is following:","\"{$data|update(mtu,mtu `$data.mtu`\\n,no mtu\\n)}\"","$data represents the data structure on which we check if it was updated from the previous state.","mtu first argument represents the name of the field that should be checked within the $data","mtu `$data.mtu`\\n second argument represents the actual string that will be sent to the device if the value of the field named in first argument was changed or didn't exist before","no mtu\\n third argument represents the actual string that will be sent to the device if the value of the field named in first argument was deleted","optional true fourth argument, if present, lets the filter know it should send both outputs to the device, first the delete string (third argument) then the update string (second argument)","Update filter does not send any of the strings to the device, if the value did not change.","When using this filter in updateTemplate method, you must use fT() method (format template) with one pair of the arguments being \"before\", before to let the template know what data represents the previous state.","Note: unfortunately, Opendaylight generates boolean fields instead of Boolean and Chunk does not work with boolean fields in the same way as any other object fields. Therefore for boolean values (eg. shutdown), you cannot use update filter and checking for changes needs to be done in a traditional way."]},{"l":"TranslateUnit","p":["Blueprint example of injecting TranslationUnitCollector to IosXRInterfaceUnit:","Handlers(readers/writers) need to be registered in this method. Parameter context.getTransport() returns Cli object containing methods for communication with a device via CLI - should be passed to readers/writers.","Implementation of TranslateUnit must be registered into TranslationUnitCollector and must specify device type and device version during registration. Snippet below shows registration of IosXRInterfaceUnit for device type \"ios xr\" all versions starting with \"5\".","Implementation of TranslateUnit must implement these methods:","Instance-identifier in generic reader/writer must be without keys pointing to the target composite node used in implemented reader/writer.","Instance-identifiers for YANG container and list (not for augmentations and nodes behind augmentations) are automatically generated to IIDs class (used in examples bellow) during build of openconfig project.","Ordering of writers- writers are stored in a linear structure and are invoked in order of registration. When registering a writer a relationship with another writer or set of writers can be expressed using addBefore, addAfter, subtreeAddBefore, subtreeAddAfter methods. E.g. InterfaceWriter and VRFInterfaceWriter should have a relationship: InterfaceWriter -> VRFInterfaceWriter so that first an interface is created and only then assigned to VRF. Note: VRF writer should be between them. If the order is not expressed during registration, commands might be executed on device in an unpredictable/invalid order.","Return RPC services implemented in the translation unit. Parameter context.getTransport() returns Cli object containing methods for communication with a device via CLI - may need to be passed to RPC implementations.","Return unique string among all translation units which will be used as ID for the translation unit (e.g. \"IOS XR Interface (Openconfig) translate unit\")","Return YANG models containing composite nodes handled by handlers(readers/writers). Default implementation returns empty Set if no handlers are implemented.","rRegistry.add","rRegistry.addNoop","rRegistry.subtreeAdd","Set getYangSchemas()","Set getRpcs(@Nonnull Context context)","String toString()","This method should also register for general Openconfig checks:","Translate unit class must implement interface TranslateUnit. Naming convention for translate unit class is device-type+openconfig-domain+Unit (e.g. IosXrInterfaceUnit). Translate unit class is usually instantiated, initialized and closed from Blueprint.","Use for writers handling data of whole composite node subtrees. This ensures that if only a child node is updated, the writer gets triggered. Method subtreeAdd requires a set of IIDs for all handled children, the IIDs must start from the reader itself, not from root.","Use to register noop writers","Use when a reader implementation also fills composite child nodes of target composite node. Method subtreeAdd requires a set of IIDs for all handled children, the IIDs must start from the reader itself, not from root.","Use when common GenericConfigListReader, GenericConfigReader, GenericOperListReader or GenericOperReader need to be registered.","Use when common GenericListWriter or GenericWriter are registered.","void provideHandlers(@Nonnull ModifiableReaderRegistryBuilder rRegistry, @Nonnull ModifiableWriterRegistryBuilder wRegistry, @Nonnull Context context)","wRegistry.add","wRegistry.subtreeAdd"]},{"l":"CLI Init Translation Unit","p":["Init translation unit does not contain readers and writers but it only contains implementation of TranslateUnit. There should be only one init translation unit per device type. Purpose of the init TU is to setup CLI prompt and define rollback strategy.","The implementation of TranslateUnit needs to override methods:","SessionInitializationStrategy getInitializer(@Nonnull final RemoteDeviceId id, @Nonnull final CliNode cliNodeConfiguration)","Implement and return device specific SessionInitializationStrategy where:","Setup device CLI terminal with attributes like width and length allowing to display infinite output.","Enter desired CLI mode which will be used as default - every reader and writer gets CLI prompt in this state (e.g. EXEC mode for IOS, config mode for IOS-XR, cli mode for Junos)","String toString()","Return unique string among all translation units which will be used as ID for the registration of the translation unit (e.g. \"Junos cli init (FRINX) translate unit\").","These methods may be overridden if necessary:","getPreCommitHook()- method that is invoked before actual commit is written into device. For example this method can enter configuration mode.","getCommitHook()- method that invokes actual commit and should catch any error on commit. Also it should handle any post-commit actions when the commit was successful.","getPostFailedHook()- method that is invoked when commit fails. Should implement aborts or revert strategies.","Methods like getYangSchemas, getRpcs should return empty sets and method provideHandlers should return nothing, just use the read registry and write registry to register handlers.."]},{"l":"NETCONF Unified Translation Unit","p":["Unified translation units are located in https://github.com/FRINXio/unitopo-units repository.","Kotlin is used as prefered programming language in NETCONF translation units because it provides type aliases and better null-safety."]},{"i":"readers-1","l":"Readers","p":["Readers are handlers responsible for reading and parsing the data coming from a device","There are 2 types of readers: Reader and ListReader. Reader can be used to handle container or argument nodes and ListReader should handle list nodes from YANG.","Both types need to implement readCurrentAttributes to fill the builder with appropriate values","ListReader needs to also implement getAllIds() where it retrieves a key for each item to be present in current list. After the list is received, framework will invoke readCurrentAttributes for each item from getAllIds"]},{"i":"mandatory-interfaces-to-implement-2","l":"Mandatory interfaces to implement","p":["Each reader needs to implement one of these interfaces based on type of target node in YANG.For more information about methods please read javadocs.","ConfigListReaderCustomizer- implement this interface if target composite node in YANG is list and represents config data.","ConfigReaderCustomizer- implement this interface if target composite node in YANG is container or augmentation and represents config data.","OperListReaderCustomizer- implement this interface if target composite node in YANG is list and represents operational data.","OperReaderCustomizer- implement this interface if target composite node in YANG is container or augmentation and represents operational data."]},{"i":"base-readers-1","l":"Base Readers","p":["Each base reader for netconf readers should be generic. The generic marks the data element within device YANG that is being parsed into. The base reader should contain abstract methods:","fun readIid(): InstanceIdentifier- each child reader should fill in the device specific InstanceIdentifier that points to the information needed for this reader. Arguments may vary and they are used to be more specific IID (eg. when creating an IID to gather information about a specific interface, you may want to pass interface name as argument).","fun readData(data: T?, configBuilder: ConfigBuilder, )- this method is used to transform Openconfig data (contained in ConfigBuilder) into device data (T) using .","Note: naming of the methods should be unified in order to be easily parsed by auto-generated documentation."]},{"i":"writers-1","l":"Writers","p":["A writer needs to implement all 3 methods: Write, Update, Delete in order to fully support default rollback mechanism of the framework","Time showed that update like 1. delete, 2. write is anti-pattern and should not be used. There is just one case where it is necessary: when re-writing list entry, you must first delete the previous entry, then write the new one, otherwise the previous entry would still be present and the new entry will be added to the list.","A writer can properly work only if there is a reader for the same composite node","The framework provides safe methods to use when handling data on device:","safePut deletes or adds managed data. Does not touch data that was previously on the device and is not handled by the writer.","safeMerge stores just the changed data into device. Does not touch data that was previously on the device and is not handled by the writer.","safeDelete removes data from the device only if the managed node does not contain any other information (even one not handled by the writer).","This test demonstrates the usage of safe methods."]},{"i":"mandatory-interfaces-to-implement-3","l":"Mandatory interfaces to implement","p":["Each writer needs to implement one of these interfaces based on type of target node in YANG. Unlike mandatory interfaces for reading, only interfaces for writing config data are available (because it is not possible to write operational data). For more information about methods please read javadocs.","ListWriterCustomizer- implement this interface if target composite node in YANG is list. An implementation needs to be registered as GenericListWriter.","WriterCustomizer- implement this interface if target composite node in YANG is container or augmentation. An implementation needs to be registered as GenericWriter."]},{"i":"base-writers-1","l":"Base Writers","p":["Each base writer should be generic and contain abstract methods:","fun getIid(id: InstanceIdentifier): InstanceIdentifier- this method returns InstanceIdentifier that points to a node where data should be written","fun getData(data: Config): T- this method transforms Openconfig data into device specific data (T)"]},{"i":"translateunit-1","l":"TranslateUnit","p":["Translate unit class must implement interface TranslateUnit. Naming convention for translate unit class is just name Unit. Translate unit class is usually instantiated, initialized and closed from Blueprint.","Implementation of TranslateUnit must be registered into TranslationUnitCollector and must provide set of supported underlay YANG models. Snippet below shows registration of Unit for junos device version 17.3.","Blueprint example of injecting TranslationUnitCollector to Juniper173InterfaceUnit:","Implementation of TranslateUnit must implement these methods:","toString(): String","Return unique string among all translation units which will be used as ID for the translation unit (e.g. \"IOS XR Interface (Openconfig) translate unit\")","getYangSchemas(): Set","Return YANG models containing composite nodes handled by handlers(readers/writers). It must return empty Set if no handlers are implemented.","getUnderlayYangSchemas(): Set","Return YANG module informations about underlay models used in the translation unit. These YANG modules describes configuration of NETCONF capable device.","getRpcs(underlayAccess: UnderlayAccess): Set>","Return RPC services implemented in the translation unit. Default implementation returns an emptySet. Parameter underlayAccess represents object containing methods for communication with a device via NETCONF and should be passed to readers/writers.","provideHandlers(rRegistry: ModifiableReaderRegistryBuilder, wRegistry: ModifiableWriterRegistryBuilder, underlayAccess: UnderlayAccess): Unit","Handlers(readers/writers) need to be registered in this method. underlayAccess represents object containing methods for communication with a device via NETCONF and should be passed to readers/writers.","How to register readers/writers is described in CLI TranslateUnit"]},{"l":"Translation units for different device versions","p":["In case of needing to implement a new CLI Translation Unit for specific version of device we create a new TranslateUnit(e.g. located in iosxr/mpls).","In this case we use IOSXR4.* implementation as an example."]},{"l":"Device registration","p":["In TranslateUnit we had just created, e.g. MplsUnitXR4.java, we have to register device as a constant located ../iosxr/utils/IosXrDevices.java containing device type and version as described in TranslateUnit documentation.","This unit can reuse all writers/readers from existing ones, except the writer (or other handler) we want to alter or create (in our example writer for tunnel configuration). We have to create a new writer with desired behaviour and add it into provideWriters method."]},{"i":"handlers-1","l":"Handlers","p":["In our example, the newly created writer have to implement CliWriter interface as well as all the methods mentioned in Writers. With other handlers we proceed with same logic.","Similar process apply on every new implementation of different device version."]},{"l":"How to write extensions for OpenConfig"},{"i":"best-practices-for-handlers-readerswriters","l":"Best practices for handlers (readers/writers)","p":["All comments are in English","All defined exceptions can be thrown from the code","All new dependencies and imports are actually used","All variables/methods are actually used","Before pushing the code make sure:","Chunk","Code has correct spacing","Commented out code","Comments are appropriate to the code behavior","Constants","Do not push code that contains following:","Double blank lines","java regexes","New classes/interfaces have the correct license header","New classes/interfaces/yang model have correct date","Reflection","Show commands","Static imports","Trailing whitespaces or tabs"]}],[{"l":"FAQ"},{"i":"what-is-the-datastore-used-in-frinx-uniconfig-","l":"What is the datastore used in FRINX UniConfig ?","p":["Uniconfig uses a custom in memory database which is part of MD-SAL and it is a very fast storage for YANG modeled data. UniConfig uses datastore only for caching data in the scope of a single transaction. For persistence purposes, UniConfig uses PostgreSQL database."]},{"i":"are-service-instances-stored-in-the-uniconfig-layer-of-frinx-","l":"Are service instances stored in the UniConfig layer of FRINX ?","p":["Only the „outputs“ of a service are stored and managed by UniConfig(e.g. service generates bgp config for 10 devices, which is pushed into UniConfig). The services themselves are responsible for managing their configuration/operational state and rely on the same database to store configuration or operational data."]},{"i":"how-does-frinx-deal-with-model-changes-","l":"How does FRINX deal with model changes ?","p":["OpenConfig models are compiled as part of the UniConfig and because of this reason it is possible to change these models only before compilation. On the other side, NETCONF models can be dynamically loaded from device and also manually updated using dedicated RPC:","https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/network-management-protocols/uniconfig_netconf/netconf-intro.html#registration-or-refreshing-of-netconf-cache-repository-using-rpc"]},{"i":"does-frinx-provide-auto-rollback-on-all-affected-devices-when-a-transaction-fails-on-one-or-more-devices-","l":"Does FRINX provide auto rollback on all affected devices, when a transaction fails on one or more devices ?","p":["Yes, all onboarded devices have full rollback implemented. But it is also possible to disable auto-rollback in UniConfig, so that successfully configured devices will keep their configuration. This can be done with setting up the 'do-rollback' flag to False in input of Commit RPC."]},{"i":"is-it-possible-to-show-the-differences-between-the-actual-device-configuration-and-the-operational-datastore-while-synchronizing-configuration-into-frinx-","l":"Is it possible to show the differences between the actual device configuration and the operational datastore while synchronizing configuration into FRINX ?","p":["sync (update operational)","show diff","drop the changes from device by replacing operational with config"]},{"i":"is-any-netconf-device-fully-supported-or-must-openconfig-be-mapped-to-netconf-as-well-","l":"Is any NETCONF device fully supported, or must OpenConfig be mapped to netconf as well ?","p":["You can either use the native device models (via UniConfig native) or use the existing translation units between OpenConfig and vendor models."]},{"i":"are-the-libraries-that-are-used-to-access-the-config-data-store-model-driven-","l":"Are the libraries that are used to access the Config Data Store model driven ?","p":["UniConfig has a DataBroker interface and a concept of InstanceIdentifier. Those are the model driven APIs for data access. More info:","https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Concepts"]},{"i":"what-would-an-access-to-the-configuration-data-store-look-like-in-code-","l":"What would an access to the configuration data store look like in code ?","p":["A: Just to demonstrate API, in this example InterfaceConfigurations is read from CONF DS and put back to CONF DS.","B: In this example InterfaceConfigurations is read from OPER DS."]},{"i":"is-it-possible-in-frinx-to-run-transaction-on-two-disjunct-sets-of-devices-simultaneously-","l":"Is it possible in FRINX to run transaction on two disjunct sets of devices simultaneously ?","p":["UniConfig supports build-and-commit model using which it is possible to configure devices in the isolated transactions and commit them in parallel. If there are some conflicts between configured sets of devices, then the second transaction that is committed, will fail(however, it cannot happen on disjunct sets of devices)."]},{"i":"what-access-control-measures-does-frinx-offer-","l":"What access control measures does FRINX offer ?","p":["FRINX UniConfig supports local authentification, password authentification, public key authentification Token authentification, RADIUS based authentification and subtree based authentification via AAA Shiro project."]},{"i":"how-does-frinx-report-problems-with-device-interaction-","l":"How does FRINX report problems with device interaction ?","p":["If a device can not be reached during a UniConfig transaction (after trying reestablishing the connection) a timeout will occur and the cause for the transaction failure will be reported. UniConfig also uses keepalive messages for continuous verification of connection to devices(both using NETCONF and CLI management protocols)."]},{"i":"is-it-possible-to-backup-configuration-","l":"Is it possible to backup configuration ?","p":["UniConfig stores all committed configuration of devices, templates, and snapshots in the PostgreSQL database. We suggest to use existing techniques for backup that are also provided by PostgreSQL."]},{"i":"is-it-possible-to-enforce-policies-over-configuration-changes-","l":"Is it possible to enforce policies over configuration changes ?","p":["All customer specific validations and policy enforcements can be implemented in layers above UniConfig"]},{"i":"in-which-languages-are-the-libraries-to-access-frinx-written-","l":"In which languages are the libraries to access FRINX written ?","p":["UniConfig is written in JAVA and Kotlin which can use data objects generated from YANG. RESTful API (RESTCONF) can be used with language that implements REST client (for example, Python)."]},{"i":"does-frinx-detect-if-a-cluster-node-is-down-on-its-own-or-does-it-rely-on-a-high-availability-framework-","l":"Does FRINX detect if a cluster node is down on its own or does it rely on a high availability framework ?","p":["UniConfig instance is stateless - it doesn’t persist any configuration in its datastore (PostgreSQL is used for persistence) and it doesn’t keep permanent connections (connections to devices are created on-demand in the transaction). Because of the stateless architecture, UniConfig instances in the ‘cluster’ don’t have to communicate with each other and they don’t require any coordination. You must only keep in mind that requests that belong to the same transaction must be forwarded to the same UniConfig backend - for this purpose you can use any HA component that supports sticky sessions based on cookies (such as HA-proxy or Traefik)."]},{"i":"is-it-possible-for-frinx-to-report-problems-to-a-network-monitoring-system-","l":"Is it possible for FRINX to report problems to a network monitoring system ?","p":["FRINX UniConfig can propagate NETCONF notifications and internal UniConfig notifications or data-change-events from web sockets on Northbound API."]},{"i":"is-it-possible-to-do-additional-logging-on-the-logging-provided-by-uniconfig-","l":"Is it possible to do additional logging on the logging provided by UniConfig ?","p":["Yes it is. Each component writes logs at different verbosity levels of logging (ERROR, WARN, INFO, DEBUG, TRACE). We are using the logback framework for logging of messages - logging can be adjusted by modification of config/logback.xml file in the standard way. This file can be updated also on runtime. The second approach for adjusting of logging of some specific components is using logging controller: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/operational-procedures/logging/logging.html"]},{"i":"where-do-i-find-the-status-of-the-device-and-where-do-i-find-error-messages-when-installing-does-not-work-","l":"Where do I find the status of the device and where do I find error messages, when installing does not work ?","p":["installing/uninstalling process is done automatically - device is installed when UniConfig must read/write some data from/to device and device is automatically uninstalled at the end of the transaction if no other transaction is using the same installpoint. Users should not care about the installing process since it is transparent - it is useful only for debugging purposes. To get status of the installing process for all devices in the system, issue following request (it will show status as well as last connect attempt cause):","CLI devices:","NETCONF devices:"]},{"i":"what-does-installation-and-installing-exactly-do-","l":"What does installation and installing exactly do ?","p":["Opening IO session to device (TCP session with SSH and/or NETCONF on top of SSH session).","Exposing installpoint that can be used from internal API and RESTCONF API for interaction with device.","Opening internal transaction","installing of device with input parameters (CLI / NETCONF)","Syncing configuration from device","Writing configuration and install information into database","Uninstalling device","Committing transaction"]},{"i":"why-i-can-not-install-junos-device-on-uniconfig-","l":"Why I can not install Junos device on UniConfig ?","p":["If installing Junos devices is not possible and UniConfig gives response :","It is necessary to set up on Junos device netconf session compliant to RFC and Yang schemas (rfc-compliant, yang-compliant)"]}],[{"l":"Glossary of Terms","p":["MD-SAL https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:FAQ- Model driven service application layer","OPENFLOW https://en.wikipedia.org/wiki/OpenFlow- OpenFlow communications protocol that exposes the forwarding plane of a network switch or router over the network OPENDAYLIGHT https://www.opendaylight.org/","RESTCONF https://tools.ietf.org/html/draft-ietf-netconf-restconf-1- draft-ietf-netconf-restconf-12 SDN Software defined networking– management of network services through abstraction of higher-level functionality.","NETCONF https://tools.ietf.org/html/rfc6242","Using the NETCONF Protocol over Secure Shell (SSH) https://tools.ietf.org/html/rfc6241","Network Configuration Protocol (NETCONF) https://tools.ietf.org/html/rfc5277","NETCONF Event Notifications https://tools.ietf.org/html/rfc6243","With-defaults Capability for NETCONF YANG https://tools.ietf.org/html/rfc6020 a modelling language for NETCONF"]}],[{"l":"List of Supported Devices","p":[".*","(mounted as .*)","(mounted as ios xr .*)","(mounted as Junos 14.*)","(mounted as sros .*)","1.*","12.*","13*/14*","14.*","15.*","16.*","16.*(and later)","17.*","18.*","2.*","3.*","4.*","5.*","6.*","6.6.1 (and later)","8.*","Arista","Brocade","Calix","Casa","Ciena","Cisco","CLI access via REST","CLI to OC translation","Cumulus","Cumulus Linux","Dasan","Device OS Type","Device Version","eos","For details of translation units see our Github: cli_units and unitopo_units.","Here you can find list of all the devices and features supported by Frinx UniConfig:","Huawei","ios classic","ios xe","ios xr","IP Infusion","ironware","Juniper","junos","Microsoft","Mikrotik","NETCONF access via REST","NETCONF to OC translation","nexus","Nokia","nos","OC = OpenConfig","OcNOS","SAOS","SonicOS","sros","Ubiquity","ubnt es","Vendor","vrp"]}],[{"l":"FRINX Workflow Manager introduction","p":["FRINX Workflow Manager allows customers to create automated, repeatable, digital processes to build, grow and operate their digital communication infrastructure. FRINX Workflow Manager is based on open-source components and enables infrastructure and network engineers to create and operate workflows to implement configuration changes and obtain operational data from their heterogeneous networks and clouds. Typical examples are the automation of services that span resources in the cloud and physical assets, the automation of slices and capacity increases in mobile networks, the interaction with CRM and inventory systems, the management of Internet and Infrastructure services and the automation of core network functions. Workflow Manager can be deployed standalone or as part of FRINX Machine.","FRINX Workflow Manager uses Netflix's Conductor for task/workflow orchestration. We recommend to take a look at their Documentation as an introduction to Tasks, Workflows, Definitions and an overall prerequisite to working with FRINX Workflow Manager."]}],[{"l":"Create and Modify Workflows and Workers"},{"l":"Prepare Your Work Environment","p":["After you have installed and started the FRINX Machine (see\" https://github.com/FRINXio/FRINX-machine\") you will want to modify existing workflows or add new workflows and workers to meet your needs. We will be referring to the machine that is running the FRINX Machine containers as host. Typically that host is a VM running on your laptop, in your private cloud or in a public/virtual private cloud. Here is how to get started."]},{"l":"Creating a worker","p":["Now that we have our environment prepared, we can move on to the first step of creating a workflow. First we will create a worker that defines the tasks utilized in our workflow. The goal is to have the task in our workflow receive two input parameters (id_1 and id_2). The purpose of our task is to add the two input variables and return the result. The execution logic of our task will be implemented in a small python function called worker.","For a full documentation of tasks, workflows and the capabilities of Netflix Conductor, please go to https://netflix.github.io/conductor/","Create a worker in a correct repository (name of the worker is up to you):","This is what we put in the file in our case:","Core of the worker is a task that contains simple method which does addition with two inputs which user provides in GUI as you will see later. Workers can have multiple tasks within itself, in our case one is enough as an example.","After this, you must register your worker in the main python file\"main.py\" in the same directory where you just created your worker. All workers you want to use in Frinx Machine must be included in this file. File might look similar to this:","Notice lines 22 and 53, you must import both the worker file and include it in \"register_workers(cc)\" method.","That is all in terms of worker creation. There is however few more things to do in your environment. After doing all the above, we will want to build our Frinx Machine based on our local changes. For that we must edit the file \"swarm-fm-workflow.yml\"","Find block \"demo-workflows\" in this file. Change the image to use a image called \"local\" (2):","Now we can build our fm-workflows image with the added task. Use:","While it is not necessary to use \"--no-cache\" flag, we recommend it to make sure you rebuild the image with newly edited code and not the one stored in cache memory.","Now just start fm-workflows and you're good to go:","If you did everything correctly, you will now see your new task in Frinx Machine. Go to Workflow Manager -> Tasks -> Search:","Search integers","Now you can create workflow that uses this task. Workflow Manager-> \"+ New\":"]},{"i":"after-being-prompted-for-inputs-you-should-see-that-addition-ran-successfully","l":"After being prompted for inputs, you should see that addition ran successfully:","p":["Search integers"]}],[{"l":"Device Blueprints","p":["Blueprints allow you to create a template that can be used for quick adding of devices. They are created with JSON snippets."]},{"l":"Creating new blueprint","p":["To create a new blueprint click on the Explore button in the Explore and configure device tab and then click the Blueprints tab in the top bar. Here you can Add blueprint.","Create blueprint"]},{"l":"Using a blueprint","p":["To use blueprint when adding a new device toggle the \"Blueprints\" switch in the form and choose the blueprint that you want to use.","Use Blueprint"]},{"l":"Blueprint examples"},{"i":"cisco-classic-ios-cli","l":"Cisco classic IOS (cli)"},{"i":"cisco-ios-xr-netconf","l":"Cisco IOS XR (netconf)"},{"i":"junos-cli","l":"JUNOS (cli)"},{"i":"calix-netconf","l":"CALIX (netconf)"},{"i":"nokia-netconf","l":"Nokia (netconf)"},{"i":"ciena-cli","l":"Ciena (cli)"}],[{"l":"Device Inventory","p":["Devices are stored in a Device Inventory. From here they can be dynamically installed and uninstalled."]},{"l":"Adding device to inventory","p":["To add new device to invetory, click on the Add device button in the Device inventory tab.","FM Install"]},{"l":"JSON examples","p":["To adding a new device toggle the \"Blueprints\" switch in the form and choose the blueprint that you want to use.","New devices are added by JSON code snippets. They are similar to Blueprints with one addition: device_id must be specified in the snippet."]},{"i":"cisco-classic-ios-cli","l":"Cisco classic IOS (cli)"},{"i":"cisco-ios-xr-netconf","l":"Cisco IOS XR (netconf)"},{"i":"junos-cli","l":"JUNOS (cli)"},{"i":"calix-netconf","l":"CALIX (netconf)"},{"i":"nokia-netconf","l":"Nokia (netconf)"},{"i":"ciena-cli","l":"Ciena (cli)"}],[{"l":"Workflow Builder","p":["Workflow Builder is the graphical interface for Workflow Manager and is used to create, modify and manage workflows."]},{"l":"Creating new workflow","p":["To create a new workflow click on the Create button in the Create workflow tab and fill in workflow general parameters. Then you can proceed with adding tasks .","Parameter Name is required and must be unique. Keep in mind that the name cannot be changed later. Other parameters are optional and can be changed anytime.","Create new workflow"]},{"l":"Editing existing workflow","p":["To edit an already existing workflow, find the workflow in the Definitions tab, click on it and then click on the Edit button. A diagram of the workflow will be rendered on the canvas. Now you can restructure the workflow, add new tasks, remove tasks or edit the workflow information and parameters.","Workflow edit"]},{"l":"Adding tasks","p":["To add new task on canvas, find the task in the left menu and click the + icon.","Add task"]},{"l":"Removing tasks","p":["To remove a task, click on the three dots next to a task and press the Remove task button.","Delete task"]},{"l":"Task parameters","p":["To edit or add task parameters, double-click on the task that is placed on the canvas. Input parameters can be declared as:","Input provided by user, e.g.:","Variable provided by other task, e.g.:","Statically defined, e.g.:","For full documentation of tasks see: https://netflix.github.io/conductor/configuration/taskdef/."]},{"l":"System tasks"},{"i":"fork--join","l":"Fork & Join","p":["The 'Fork' function is used to schedule a parallel set of tasks.","A Join task MUST follow Fork task.","Fork and Join"]},{"l":"Decision","p":["A decision task is similar to an if...else statement in a programming language. The task takes 2 parameters:","name of the parameter in the task input whose value will be evaluated (default is param)","value that will be compared with param(or other specified input variable)","If param and is equal to are evaluated as equal, the workflow will continue to If branch, otherwise the workflow will continue in else branch.","Else branch is optional and can be empty."]},{"l":"Lambda","p":["Lambda Task helps execute ad hoc logic at Workflow run-time, using javax & Nashorn Javascript evaluator engine. This is particularly helpful in running simple evaluations in the Conductor server, instead of creating Workers.","The task output can then be referenced in downstream tasks like:"]},{"l":"HTTP","p":["An HTTP system task is used to make calls to another microservice over HTTP. You can use GET, PUT, POST, DELETE Methods and also you can set your custom header."]},{"l":"TERMINATE","p":["Task that can terminate a workflow with a given status and modify the workflow's output with a given parameter. It can act as a \"return\" statement for conditions where you simply want to terminate your workflow. For example, if you have a decision where the first condition is met, you want to execute some tasks, otherwise you want to finish your workflow.","name","description","notes","terminationStatus","can only accept “COMPLETED” or “FAILED”","task cannot be optional","workflowOutput","Expected workflow output"]},{"l":"EVENT","p":["Event task provides ability to publish an event (message) to either Conductor or an external eventing system like SQS. Event tasks are useful for creating event based dependencies for workflows and tasks.","When producing an event with Conductor as sink, the event name follows the structure:"]},{"l":"WAIT","p":["A wait task is implemented as a gate that remains in IN_PROGRESS state unless marked as COMPLETED or FAILED by an external trigger. To use a wait task, set the task type as WAIT"]},{"l":"jsonJQ","p":["jsonJQ is like sed for JSON data - it is especially useful for filtering JSON data.","Example of jsonJQ query expression could be:","It searches through the whole config and under the\"Cisco-IOS-XR-ifmgr-cfg:interface-configurations\" model we find the interface with a description that the user inputs$. The task would return the name interface with fitting description."]},{"l":"Kafka publish","p":["Kafka is a distributed publish-subscribe messaging system and a robust queue that can handle a high volume of data and enables you to pass messages from one end-point to another.","Kafka"]},{"l":"Subworkflows","p":["Subworkflows act as a regular tasks inside a parent workflow. Subworkflows can be expanded to view the tasks they contain (or other nested subworkflows) by clicking the three dots next to the subworkflow and then clicking the Expand button. Expanded subworkflows can be then edited the same way as parent workflow.","Simple tasks differs in color shade from Subworkflow tasks and cannot be expanded.","Expand"]},{"l":"Linking tasks","p":["To connect tasks or subworkflows into execution flow, drag and drop respective Out and In endpoints on nodes, like this: Out-> In"]},{"l":"Unlinking tasks","p":["To remove the link, double-click on the link."]},{"l":"Adding workflow information","p":["To provide additional workflow information, click on Actions in the upper right-hand corner and then click Edit workflow."]},{"l":"Output parameters","p":["We can specify custom output parameters of a workflow, by using JSON templates to generate the output of the workflow. If not specified, the output is defined as the output of the last executed task.","Let's say we have a task with taskReferenceName: task1 which returns summary and we want output of the worklow to be output of this specific task only. The outputParameter value named e.g. finalResult will be:","For full documentation of workflow parameters and definition read https://netflix.github.io/conductor/configuration/workflowdef/."]},{"i":"defaults--description","l":"Defaults & Description","p":["Here, we can define default values and descriptions for workflow inputs. Each input value declared as ${workflow.input...} will appear in a dropdown list of available input parameters."]},{"l":"Save and execute workflow","p":["To Save workflow, click on the Actions button in the upper right corner and select Save workflow. Then you can find the workflow in the Explore workflows section under Definitions tab.","To Execute workflow directly from the builder, click on the Save and execute button in the upper right corner. You will be prompted to provide input parameters.","Executing workflow will also save the workflow."]},{"l":"Import and export of workflows","p":["To import workflow, click the setting icon and then select the Import button. Only valid JSON definition of the workflow will be imported.","Imported workflow will not be saved until you Save or Execute it.","Import/Export workflow","To export and save the workflow in JSON format into your filesystem, click on Export button.","In order to choose a location to which you want to export the workflow, you have to have it enabled in your browser settings. Default location is Downloads folder."]}],[{"l":"FRINX Resource Manager introduction","p":["FRINX Resource Manager was developed for network operators and infrastructure engineers to manage their physical and logical assets and resources. Examples for assets are locations, equipment, ports and services. Examples for resources are IP addresses, VLAN IDs and other consumables required for operating data services. Resource Manager was developed specifically to address the needs of network and infrastructure engineers working with communication networks. FRINX Resource Manager provides GUI and a GraphQL based API to create, read, update and delete assets. Resource Manager can be deployed standalone or as part of FRINX Machine."]},{"l":"Features","p":["Following list contains features inherent to Resource Manager."]},{"l":"Resource type management","p":["Example resource types:","Location","Name: Latitude","Name: Longitude","Name: name of the property","Name: RD","Name: vlan","Property type","Resource Manager is flexible enough to enable user defined resource types without requiring code compilation or any other non-runtime task. With regard to resource types, this requires keeping the schema flexible enough so that users can define their own types and properties and thus create their own model.","Resource type is a blueprint for how to represent a resource instance. A resource type is essentially a set of property types, where each property type defines:","Route distinguisher","Type: float","Type: int","Type: int, string, float etc.","Type: String","VLAN"]},{"l":"Resource management","p":["A resource is an instance of a resource type consisting of a number of properties.","Example resources based on resource types from previous section:","VLAN_1","Property","Name: vlan","Value: 44","Route distinguisher_1","Name: RD","Value: 0:64222\uD83D\uDCAF172.16.1.0","Location_1","Name: Latitude","Value: 0.0","Name: Longitude","Resource types"]},{"l":"Flexible design","p":["One of the main non-functional goals of the Resource Manager is flexibility. We are designing Resource Manager to support an array of use cases without the need for modifications. To achieve flexibility we are allowing:","Custom resource type definition without changes in the DB schema","Custom allocation logic without the need to modify the backend code","Custom pool grouping to represent logical network parts (subnet, region, datacenter etc.)"]},{"l":"Multitenancy and RBAC","p":["Multitenancy and Role Based Access Control is supported by Resource Manager.","A simple RBAC model is implemented where only super-users (based on their role and user groups) can manipulate resource types, resource pools and labels. Regular users will only be able to read the above entities, allocate and free resources.","Resource Manager does not manage list tenants/users/roles/groups and relies on external ID provider. Following headers are expected by Resource Manager graphQL server:","Resource Manager does not store any information about users or tenants in the database, except the name or ID of a tenant provided in x-tenant-id header."]}],[{"l":"User Guide"},{"l":"API","p":["See examples in api_tests or a VRF IP management sample use case in postman collection."]},{"l":"UI","p":["See the Resource Manager frontend project on GitHub"]}],[{"l":"Pools","p":["A resource pool is an entity that allocates and deallocates resources for a single specific resource type. Resource pools are completely isolated from each other and there can be multiple resource pools for the same resource type even providing the same resource instances. Resource pools encapsulate the allocation logic and keep track of allocated resources. A pool instance should manage resources within the same network or logical network part (e.g. subnet, datacenter, region or the entire, global network).","Example pools:","IPv4 address pool allocating IP addresses from a range / subnet","VLAN pool allocating all available VLAN numbers 0 - 4096","Route distinguisher pool allocating route distinguishers from a specific, per customer, input","Depending on resource type and user’s requirements, pools need to be capable of allocating resources based on various criteria / algorithms. Currently, following pool types are supported by Resource Manager:"]},{"l":"SetPool","p":["Pool with statically allocated resources. Users have to define all the resources to be served in advance. The pool just provides one after another until each resource is in use.","This type of pool is suitable for cases where a set of resources to be served already exists.","Properties of SetPool","Config","Set of unique resources to provide","Name of the pool","Resource recycling - whether deallocated resources should be used again","Operational","Utilisation - % of pool capacity used"]},{"l":"SingletonPool","p":["SingletonPool serves just a single resource for every request.","This type of pool can be utilized in special uses cases such as serving a globally unique single AS number of an ISP. Instead of hardcoding the AS number as a constant in e.g. workflows, it can be “managed” and stored in the Resource Manager.","Properties of SingletonPool","Config","A single unique resources to provide","Name of the pool"]},{"l":"AllocatingPool","p":["a predefined set of resources cannot be used","AllocatingPool is a type of pool that enables algorithmical resource allocation. Instead of using a pre-allocated set of resources to simply distribute, it can create resources whenever asked for a new resource. This type of pool allows users to define a custom allocation logic, attach it to the pool and have use-case specific resource allocations available. Important feature of this pool type is the ability to accept new allocation logic from users in the form of a script without having to rebuild the Resource Manager in any way.","Allocation strategy - a script defining the allocation logic","Config","Example AllocationPools:","In general, anything that a user might need","Limit - hard limit on total number of resource that can be produced","Name of the pool","Operational","or in general whenever using an allocation script makes more sense then using a predefined set of resources","Pool providing all available VLAN numbers","Pool providing IPv4-mapped IPv6 addresses from a specific range / subnet","Pool providing just odd VLAN numbers","Pool providing random VLAN numbers","Pool providing Route Distinguishers that include customer specific information (which is passed as “additional input” as part of resource claim request)","Properties of AllocatingPool","resource creation requires additional inputs","Resource recycling - whether deallocated resources should be used again","This type of pool can be used when","Utilisation - % of pool limit used"]},{"l":"Nested pool","p":["Resource Manager allows to create nested pools. Nested pools provide possibility to create subgroups from already existing pools. With these subgroups it is easier to reason about topology."]},{"l":"How to create nested pool","p":["Process (in UI):","Create pool or open existing one","Allocate resource in newly created or existing pool","Open create pool page","Select parent from which nested pool should be created","Select allocated resource of parent from which nested pool will be taking resources","Fill other mandatory inputs","Push button to create nested pool","After successful submit newly created nested pool should be visible in pools list or in nested pools list in its parent detail page. Also it is possible to create nested pool from detail page of pool."]},{"l":"Allocation strategy overview","p":["Allocation strategy encapsulates the allocation logic and is always tied to (an) instance(s) of AllocatingPool. The strategy is defined in form of a script using Javascript (or similar) language and its responsibility is:","To produce a new (unique) resource instance based on a set of previously allocated resources and any additional, user submitted input.","Apart from a resource being unique, there are no other requirements on what the strategy needs to do. It gives users the freedom to implement any logic.","Allocation strategy can take any input provided in a structure named userInput. This input is provided by the user every time they claim a new resource.","Allocation strategy also gets access to a list of already allocated resources and any properties associated with the pool being utilized."]},{"l":"Pool hierarchies","p":["Resource Manager allows pools to be organized into hierarchies e.g."]},{"l":"Labels","p":["Labels enhance resource management by allowing a pool to be marked with a custom string. Multiple pools can have the same label forming a logical group of pools.","A group of pools under the same label can be dedicated to some logical part of a network (e.g. datacenter, subnet, region etc.).","A single pool should typically have only one label i.e. it should not be re-used across unrelated networks.","The following diagrams represent some of the configurations that can be achieved using Labels:"]},{"i":"configuration-pool-instance-per-label","l":"Configuration: Pool instance per Label","p":["Enables: Resource reuse in multiple networks","Instance per label"]},{"i":"configuration-pool-instance-under-multiple-labels","l":"Configuration: Pool instance under multiple labels","p":["Enables: Unique resources across different networks","Instance multiple labels"]},{"i":"configuration-pool-grouping","l":"Configuration: Pool grouping","p":["Enables: Dividing resource pools into groups based on network regions. Enables users to simply ask for a resource based on label name + resource type (removing the need to know specific pools)","Pool grouping"]},{"i":"configuration-multiple-pool-instances-under-the-same-label","l":"Configuration: Multiple pool instances under the same Label","p":["Enables: Resource pool expansion in case an existing pool runs out of resources. Serves as an alternative to existing pool reconfiguration. If multiple pools of the same type are grouped under the same label, the pools are drained of resources in the order they have been added to this group/label.","Multiple pool instances"]}],[{"l":"Resource Manager architecture","p":["Following diagram outlines the high level architecture of Resource Manager.","Architecture","User authentication and authorization as well as user and tenant management is outside of Resource Manager. Resource Manager is typically deployed behind an api-gateway that handles authentication and authorization relying on an external Identity Managmenet system.","The only aspect of tenancy management that needs to be handled by Resource Manager is: per tenant database creation and removal. Each tenant has its own database in database server."]},{"l":"Technology stack","p":["AAA","Also handles schema migration: creates or updates tables in DB according to ent schema","Backend server","Database","Ent is an ORM framework for go","Entgo.io","Gqlgen","Gqlgen is a graphql framework for go","GraphQL","Isolated and limited for safety and performance","Postgres","Primary API of Resource Manager will be exposed over GraphQL(over HTTP)","PSQL is the DB of choice, but thanks to ent framework hiding the interactions with the database, other SQL DB could be used in the future","RBAC rules can be defined as part of the schema","Resource Manager will rely on technologies used by the Inventory project currently residing at: https://github.com/facebookincubator/magma since both projects are similar and have similar requirements.","Separate process","Tenant and user management is out of scope of Resource Manager and will be handled by an external identity management system.","This section provides details on intended technologies to develop Resource Manager with.","WASM","Web assembly runs any user defined code executing allocation logic for user defined resource pools","Works well on top of entgo.io ORM"]},{"l":"Entity model","p":["Following diagram outlines the core entity model for Resource Manager:","Entities"]}],[{"l":"Developer Guide"},{"l":"Dependency on symphony","p":["Resource Manager currently depends on a project called symphony.","This project is not publicly accessible and without access to it, Resource Manager cannot be built. In that case, use pre built docker images from dockerhub."]},{"l":"Folder structure","p":["api-tests","core codebase for pools and resoruce allocation","ent- ORM schema and generated code for the DB","ent/schema","graph/graphhttp","graph/graphql","graphQL schema and generated code for graphQL server","graphQL server","integration tests","logging","logging framework","multitenancy, RBAC and DB connection management","ORM schema","pkg- helm chart for Resource Manager","pools","psql","psql DB connection provider","viewer"]},{"l":"Build","p":["It is advised to build Resource Manager as a docker image using Dockerfile and run it as a docker container.","The reason is that Resource Manager uses wasmer and pre built js and python engines for wasm. These are not part of the codebase and thus simply running Resource Manager would fail, unless you provide these resources e.g. by copying them out of Resource Manager built docker image.","Resource Manager utilizes wire to generate wiring code between major","components. Regenerating wiring is not part of standard build process ! After modifying any of the wire.go files perform:"]},{"l":"GraphQL schema","p":["Resource Manager exposes graphQL API and this is the schema."]},{"l":"Built in strategies","p":["Resource Manager provides a number of built in strategies for built in resource types and are loaded into Resource Manager at startup.","Built in strategies code base","Built in strategies unit tests","These strategies need to be tested/built and packaged for Resource Manager. This test/build process in scrips section of package.json while the packaging part can be found in generate.go.","Resource types associated with these strategies can be found in load_builtin_resources.go."]},{"l":"Unit tests"},{"l":"Integration tests"},{"l":"API tests","p":["There's a number of api tests available and can be executed using integration-test.sh. These tests need to be executed against Resource Manager running as a black box (ideally as a container)."]},{"l":"Wasmer","p":["There's a number of tests testing core components that require wasmer, quickjs and python packages to be available. It is recommended to run these tests in a docker container.","Example execution:"]},{"l":"Additional info"},{"l":"Telementry","p":["Support for tracing (distributed tracing). Streams data into a collector such as Jaeger. Default is Nop. See main parameters or telementry/config.go for further details to enable jaeger tracing"]},{"l":"Health","p":["Basic health info of the app (also checks if mysql connection is healthy)"]},{"l":"Metrics","p":["Prometheus style metrics are exposed at:"]}]] \ No newline at end of file +[[{"i":"welcome-to-frinx-documentation","l":"Welcome to FRINX Documentation!","p":["The FRINX documentation site contains all FRINX projects, releases and documentation. Please, use search bar in the upper left corner to find specific issues and information that you demand."]},{"l":"FRINX Machine","p":["FRINX Machine provides a platform allowing easy definition, execution and monitoring of complex workflows using FRINX UniConfig."]},{"l":"FRINX UniConfig","p":["FRINX UniConfig is a suite of applications aimed at network configuration management."]},{"l":"FRINX Workflow Manager","p":["FRINX Workflow Manager allows customers to create automated and repeatable digital processes to build, grow and operate their digital communication infrastructure."]},{"l":"FRINX Resource Manager","p":["FRINX Resource Manager helps network operators and infrastructure engineers manage their physical and logical assets and resources."]}],[{"l":"FRINX Machine introduction","p":["FRINX Machine is a dockerized deployment of multiple elements. The FRINX Machine enables large scale automation of network devices, services and retrieval of operational state data from a network. User specific workflows are designed through the use of OpenConfig NETCONF & YANG models, vendor native models, and the CLI. The FRINX Machine uses dockerized containers that are designed and tested to work together to create a user specific solution.","For installation, please refer to: FRINX Machine repository","FRINX-machine can be installed in Kubernetes using the Helm chart"]},{"l":"FRINX Machine components"},{"l":"FRINX UniConfig","p":["Connects to the devices in network","Retrieves and stores configuration from devices","Pushes configuration data to devices","Builds diffs between actual and intended config to execute atomic configuration changes","Retrieves operational data from devices","Manages transactions across one or multiple devices","Translates between CLI, vendor native, and industry standard data models (i.e. OpenConfig)","Reads and stores vendor native data models from mounted network devices (i.e YANG models)","Ensures high availability, reducing network outages and down time","Executes commands on multiple devices simultaneously"]},{"i":"netflix-conductor-workflow-engine","l":"Netflix Conductor (workflow engine)","p":["Atomic tasks are chained together into more complex workflows","Defines, executes and monitors workflows (via REST or UI)","We chose Netflix’s conductor workflow engine since it has been proven to be highly scalable open-source technology that integrates very well with FRINX UniConfig. Further information about conductor can be found at:","Sources: https://github.com/Netflix/conductor","Docs: https://netflix.github.io/conductor/"]},{"i":"elasticsearch-inventory-and-logs","l":"Elasticsearch (inventory and logs)","p":["Stores inventory data in near real-time","Stores workflow execution and meta data","Stores UniConfig logs"]},{"i":"uniconfig-ui-user-interface","l":"UniConfig UI (user interface)","p":["This is the primary user interface for the FRINX Machine","Allows users to create, edit or run workflows and monitor any open tasks","Allows users to mount devices and view their status. The UI allows users to execute UniConfig operations such as read, edit, and commit. Configurations can be pushed to or synced from the network","Inventory, workflow execution, metadata and UniConfig log files are all accessible through the UI","View inventory, workflow execution, metadata and UniConfig log files"]},{"l":"High Level Architecture","p":["Following diagram outlines main functional components in the FRINX Machine solution:","FM Architecture","FRINX Machine repository is available at https://github.com/FRINXio/FRINX-machine","Frinx-conductor repository is available at https://github.com/FRINXio/conductor"]},{"l":"Defining a workflow","p":["The workflows are defined using a JSON based domain specific language(DSL) by wiring a set of tasks together. The tasks are either control tasks (fork, conditional, etc.) or application tasks (i.e. encoding a file) that are executed on a remote device.","The FRINX Machine distribution comes pre-loaded with a number of standardized workflows","A detailed description of how to run workflows and tasks, along with examples, can be found in the official Netflix Conductor documentation"]},{"l":"Operating FRINX Machine","p":["To find out more about how to run the pre-packaged workflows, continue to Use cases"]}],[{"l":"Frinx Machine with Azure AD","p":["Frinx Machine supports authentification and authorization via Azure AD. The following sections describe how to set up Azure AD for Frinx Machine."]},{"l":"Client configuration","p":["Register the application in your Azure AD and configure the following settings."]},{"l":"Redirect URIs","p":["Cloud Postman","Cloud swagger","Frontend Login","Frontent login URI is passed to the installation script azure_ad.sh via --redirect_url flag.","https://< IP/DNS>/ ,e.g. https://localhost/","https://< IP/DNS>/oauth2-redirect.html","https://editor.swagger.io/oauth2-redirect.html","https://getpostman.com/oauth2/callback","https://oauth.pstmn.io/v1/callback","Local Postman","Platform configuration","Redirect URI","Set platform redirect URIs on the Authentication page. The table below shows examples of configuration settings.","Single-page application","Syntax","Web","Workflow Manager docs (swager)"]},{"i":"implicit-flow-and-singlemulti-tenancy-settings","l":"Implicit flow and single/multi-tenancy settings","p":["On the same page choose single/multi-tenancy. Based on this setting the parameter --tenant_name is defined in the installation script azure_ad.sh.","For a single-tenant, use Azure AD domain name from AD overview. For multi-tenant use value common. Enabled implicit flow is optional based on specific requirements.","Token config"]},{"l":"API permissions","p":["Client API permissions"]},{"l":"Client secrets","p":["Generate secret and use it as an input parameter for --client_secret flag in the installation script azure_ad.sh. This secret is used in KrakenD azure plugin for translating group id to the group name (human-readable format).","Azure client secrets"]},{"l":"Token claims configuration","p":["Example of encoded JWT token with claims. These claims are transferred to the request header (see KrakenD Azure Plugin docs for more info)."]},{"l":"RBAC configuration","p":["Super user is defined in .env file via ADMIN_GROUP variable."]},{"l":"Workflow Manager","p":["RBAC proxy adds 2 features on top of tenant proxy:","Ensures user authorization to access certain endpoints","Filters workflow definitions and workflow executions based on user's roles, groups and userID","RBAC support simply distinguishes 2 user types: an admin and everyone else. An admin has full access to workflow API while the ordinary user can only:","Read workflow definitions","Ordinary users can only view workflow definitions belonging to the same groups","A workflow definition (created by an admin) can have multiple labels assigned","A user can belong into multiple groups","User groups are identified in HTTP request's header field x-auth-user-roles","If an ordinary user's group matches one of the workflow labels, the workflow becomes visible to the user","Execute visible workflow definitions","Monitor running executions","Only those executed by the user currently logged in","Define user roles in workflow by adding role or group name to description label.","Example: added User.ReadWrite, Role.ReadWrite, Group.ReadWrite labels to workflow description."]},{"l":"Uniconfig","p":["Super-users (based on their role and user groups) can use all REST APIs. Regular users will only be able to use GET REST API requests.","Role","READ (GET REQUEST)","WRITE (ALL REQUEST)","Admin (Superuser)","true","Regular user","false"]},{"l":"Resource Manager","p":["A simple RBAC model is implemented where only super-users (based on their role and user groups) can manipulate resource types, resource pools and labels. Regular users will only be able to read the above entities, allocate and free resources.","Role","READ","WRITE","Admin (Superuser)","true","Regular user","false"]}],[{"l":"Grafana","p":["Grafana is an open source visualization and analytics software. It allows to query, visualize, alert on, and explore metrics, logs, and traces no matter where they are stored.","Grafana Login page","By default, Grafana can be accessed at localhost:3000 or 127.0.0.1:3000","Default credentials are:","Username: frinx Password: frinx123!"]},{"l":"Monitoring","p":["Grafana in FRINX Machine monitors multitude of metrics. At this time, these are:","Device monitoring","FRINX Machine logs","Node monitoring","Swarm monitoring","SSL monitoring","UniConfig-controller monitoring","Workflows monitoring"]},{"l":"Device Monitoring","p":["This dashboard displays data on a specific installed device/node."]},{"l":"FRINX Machine Logs","p":["This dashboard monitors all services running in FRINX Machine. You can filter by individual services, and also look for a specific value.","Logs Monitoring"]},{"l":"FRINX Machine Node Monitoring","p":["This dashboard monitors the state of VM/System where FRINX Machine is running. It reports info like CPU utilisation, Memory utilisation, Disk usage, Up-time etc.","Node Monitoring"]},{"l":"FRINX Machine Swarm Monitoring","p":["This dashboard monitors metrics specifically tied to FM within the VM/System. Metrics like Up-time, Available/Utilised memory, Number of running/stopped containers, CPU usage per container, Memory usage per container, I ncoming/Outcoming network traffic, etc.","Swarm Monitoring"]},{"l":"SSL Monitoring","p":["This dashboard displays data about your SSL certificates. It displays dates until your certificates are valid."]},{"l":"UniConfig Controller Monitoring","p":["This dashboard keeps track of various UniConfig transactions. It displays number of transactions at a given time."]},{"l":"Workflows Monitoring","p":["Collecting data on workflows is being worked on."]}],[{"l":"Demo Use Cases","p":["There are several ways of installing device/devices in FRINX Machine. You can either run a workflow to install a network device directly or you can add devices to your Kibana inventory and install devices from there. From your Kibana inventory, you can install a single device, but you can also install every device in the inventory simultaneously.","To start installing devices open up FRINX UniConfig UI."]},{"l":"Open FRINX UniConfig UI","p":["Open your browser and go to [host_ip] if installed locally go to https://localhost. This is the GUI (UniConfig UI) for managing all of your devices. You should see a screen like this:","FM 2.0 Dashboard","For Demo Use Cases, please download repository fm-workflows","Make sure FRINX-machine is running, navigate to","and execute","Imported workflows and tasks will appear in FRINX-Machine UI, immediately after the import finishes.","In the following articles, you'll learn how to install a device from UniConfig and how to install all devices from the inventory. This inventory is automatically set up for you when you start FRINX Machine. After that we'll learn how to create a loopback address on the devices that we previously stored in the inventory and how to read the journals of these devices.","Then we'll take a look at how to obtain platform inventory data from the devices that you have in the network and how to store them in inventory. Next, you'll learn how to save commands to your inventory and execute them on the devices that are in your network.","Lastly, we'll take a look at how you can add devices to your inventory manually. This might be useful if you wanted to play around with the FRINX Machine a bit a try installing your own networking devices."]}],[{"l":"Add a device to inventory and install it"},{"l":"Adding device to inventory","p":["To add a new device to inventory, click on the Add device button in the Device inventory tab.","Add device to inventory"]},{"l":"JSON examples","p":["New devices are added to inventory by JSON code snippets. They are similar to Blueprints with one addition: device_id must be specified in the snippet.","To add a new device from Blueprint, toggle the \"Blueprints\" switch in the form and choose the blueprint that you want to use."]},{"i":"cisco-classic-ios-cli","l":"Cisco classic IOS (cli)"},{"i":"cisco-ios-xr-netconf","l":"Cisco IOS XR (netconf)"},{"i":"huawei-cli","l":"Huawei (cli)"},{"i":"calix-netconf","l":"CALIX (netconf)"},{"i":"nokia-netconf","l":"Nokia (netconf)"},{"l":"Install the new device from Inventory","p":["Now that the device is added we can install it. We used to need dedicated workflow to install device form inventory, but now it can be done purely via UI. Click on Explore in Explore & configure devices tab, under Device Inventory section.","Install device from inventory","If you did everything correctly, your devices is now in inventory and installed, ready to be operated through Frinx Machine."]}],[{"l":"Creating a Layer 2 VPN Point-to-Point Connection","p":["This section details how to find and execute a prebuilt workflow that creates a Layer 2 VPN Point-to-Point connection within Workflow Manager."]},{"l":"Navigating through Workflow Manager","p":["From the FRINX Machine dashboard you can either select Workflow Manager--> Explore Workflows--> Explore, or select the menu tab in the upper left-hand corner and select Workflow Manager.","You can then search for Create_L2VPN_P2P_OC_uniconfig or scroll down to find it within the inventory of prebuilt workflows.","Frinx Machine Dashboard","Workflows Dashboard","Once you have located the workflow press the Play button to the right of the workflow, this will navigate you to the workflow configuration window."]},{"l":"Configuring the Workflow","p":["Input is pre-filled with following data:","L2 VPN Configuration","Once you have completed, press the Execute button, a numeric link will populate to the left of the Execute button. Click on this numeric link to see the output of the executed workflow.","Numeric Link"]},{"l":"Output of the Executed Workflow","p":["On the Workflows page you will see your executed workflows.","Select the workflow Create_L2VPN_P2P_OC_uniconfig to see the output from all of the tasks completed within this workflow.","Executed Workflow Details","This following sections are available within the output window:","Task Details: This tab gives a detailed list of the individual tasks executed within the conductor, a log of each tasks start and end time, and a status of 'Completed' or 'Failed'.","Input/Output: This is the input of the API call and the results from the API call.","JSON: This tab gives a detailed output in JSON format of all executed tasks from within the workflow. Select the Unescape button to make the output more user-friendly to read.","Edit Rerun: Allows you to make changes to your initial workflow, creating a new workflow without effecting the original.","Execution Flow: A structured map from the conductor lays out the path of tasks executed from start to finish, any forks in the path are also shown here.","If you click on any of the tasks you will receive a pop-up window that gives:","The option to review a summary of input and output of the API call.","JSON output of the completed task with that goes into greater detail about the task execution.","Log status."]},{"l":"Sub-Workflows","p":["Within the original Details of Create_L2VPN_P2P_OC_uniconfig window you will see a sub-workflow.","Sub-Workflow","This sub-workflow is an embedded task that makes a separate API call to Slack to notify a pre-defined user group that the workflow has been executed and whether it has succeeded or failed."]}],[{"l":"FRINX Machine Demo Manual","p":["Open the Frinx Demo at https://services.frinx.io/frinxui/. (Note that Mozilla Firefox is not supported.)","Select Login in the upper-right corner to log into the service. Please contact info@frinx.io for login credentials.","After logging in, you can see the FRINX Machine dashboard:","FRINX Machine dashboard"]},{"l":"Demo Config Manager UI","p":["Using the Demo Config Manager:","On the FRINX Machine main page, select Explore & configure devices.","Make sure that the device you want to configure is installed. If not, select Install first.","For this demo, we use the IOS01 device. Locate the device in the list and select the corresponding gear icon on the right. (If you see a message saying Transaction expired, select Refresh).","FRINX Machine dashboard","For the Loopback0 interface, change the enabled status to false.","Select Save to save your changes.","To review your changes, select Calculate diff.","To view the set of commands used for the change, select Dry run.","To apply changes to the device, select Commit to network. You can also see the changes in the Operational data store.","To revert changes made to the device configuration:","Select Transactions.","Select the Revert icon for your transaction.","Select Revert changes."]},{"l":"Demo workflow UI basics","p":["Workflow Builder is a graphical interface for Workflow Manager and is used to create, modify and manage workflows.","Workflows are groups of tasks and/or sub-workflows that can be used, for example, to install or delete devices, create loopback interfaces on devices, send messages and much more. You can create your own workflows or edit existing ones by adding or removing tasks or sub-workflows.","Every task and sub-workflow placed in a workflow has a unique reference alias, and no two workflows can share a name and version."]},{"l":"How to create a new custom workflow","p":["A translation of what is happening here: \"If the identified device is of the type saos, then extract the name from the output message of the previous task, change the letters to uppercase, extract the version from the output message of the previous task, glue them together and add _1(because that is how devices are named in this demo topology\".","Above every task or workflow there are two icons:","As above, if we enter the username and password directly, the workflow will not ask for credentials at startup.","decision task: Makes a different kind of decision from the lambda task discussed above. This task works like a switch on a track, sending the train one way or another. The data needed to make a decision is supplied by the lambda task.","Device_identification task:","Enter details for the new workflow. Under Name, enter a name for your workflow (note that this name cannot be changed later). The Description is for additional information about the workflow and can be left empty. Label can help you to find your workflow later under Explore workflows, but can also be left empty. Select Save changes when ready.","Enter the following into the body:","Finding your new workflow and running it with multiple different inputs such as 10 000, 10 002, 10 012, etc.","For different ports, you can see different devices with other run commands in memory.","FRINX Machine dashboard","FRINX Machine dashboard FRINX Machine dashboard","If the input value for decision is other, it directs the flow towards device_identification. If the input value is false, it directs the flow towards terminate. This corresponds to the way we connected the cells in the workflow builder.","In the Input parameters tab and the Lambda value field, enter: ${workflow.input.port}. This indicates that the task should work with what was entered in the port field in the input of this workflow. (We will cover this later, in section 7.)","In the Input parameters tab under management_ip, enter sample-topology. This is the name of the topology in this installation, whereas in production you would use a real name. For port, enter ${workflow.input.port}. If you enter a port number manually, the workflow will not ask for one when started (the same goes for management_ip and other fields). However, we want the user to be able to select a port they are interested in, as we did with the lambda task in section 4.","In the Input parameters tab, delete the default parameter foo. For the param parameter, enter ${lambda_IkSu.output.result.value}. (Note that IkSu is an automatically generated reference alias that you must edit to match the one generated for you.) What ${lambda_IkSu.output.result.value} means is to take the value from lambda_xyzq which is in the output, find the result in the output and the value in it.","In the Input parameters tab, enter COMPLETED(or FAILED, at your discretion) in the Termination status field. You can enter whatever message you want in the Expected workflow output field (for example, Device not supported.)","In the Script expression field, enter a small function which we described above.","In this case, if the specified port is both greater than or equal to 10000 and less than 10005, the status chosen is keep working. Otherwise, the status is end. This status is the output of the lambda and the input for the next task or sub-workflow.","lambda task: Makes a decision on which status to choose based on the embedded port. In this example we will only consider ports 10000–10004, and others are ignored. The lambda task lets you enter a small code (lambda - function without name) into the workflow builder.","Like we mentioned above, in this demo workflow we will assume that login credentials are the same everywhere.","Next steps:","Now we can add more tasks. In the left column under System tasks, we can add another lambda. In the Workflows section, you can find Read_journal_cli_device. Let us place them next to each other after Device_identification and concatenate them:","Now we can create a new workflow from scratch:","password: ${workflow.input.password}","Read_journal_cli_device: In the Input parameters tab under device_id, enter ${lambda_ZW66.output.result}.","Remove/Expand:","Save and run your workflow.","Second lambda: Enter ${Device_identificationRef_f7I6.output} as the lambda value, meaning \"take the output from the previous Device_identification task and use that\".","Select Create on the main page of FRINX Machine.","Sub-workflows are similar to classic workflows, but inside of another workflow. The workflow that we are creating can also be used as a building block for another workflow, becoming a sub-workflow itself. In this manner, we can layer and reuse previously created workflows.","terminated task:","The output from Read_journal_cli_device is concatenated with END, as is the output from terminated. Thus we have closed our custom workflow.","Under System tasks, click the + sign for the lambda, decision and terminate tasks. Under Workflows, click the + sign for Device_identification. Tasks and sub-workflows are added on top of each other on the canvas and can be dragged around. To connect all parts of the workflow, hover over IN and OUT where the + sign appears. Connect the parts as follows: START- lambda- decision- (other) to Device_identification and default to terminate. Each task and workflow has a reference alias after its name, which works as unique a identifier.","Update:","username and password: For this demo, we assume that the following login credentials are used on all devices: username: frinx and password: frinx","username: ${workflow.input.username}","When working with devices using different login credentials, you need to be able to change or enter them at startup. This can be achieved in the same way as with the port parameter:"]},{"i":"demo-creating-a-loopback-address-on-devices-stored-in-the-inventory","l":"Demo: Creating a loopback address on devices stored in the inventory","p":["This workflow creates a loopback interface on all devices installed in the inventory or on all devices filtered by labels. Labels are markers that serve as a differentiator.","Check if all devices are installed. You can install them manually or by executing the Install_all_from_inventory / 1 workflow.","FRINX Machine dashboard","On the main page, select Explore workflows. In the Search by keyword column, enter loopback. The Create_loopback_all_in_uniconfig / 1 workflow will appear in the list. Under Actions, select the corresponding Run button for the workflow.","Under loopback_id, insert 77 and select Execute. Click on the link that appears.","All tasks were executed correctly and are completed.","On the results page, you will see five individual tasks:"]},{"l":"INVENTORY_get_all_devices_as_dynamic_fork_tasks","p":["This workflow displays a list of all devices in the inventory or devices filtered by label. It parses the output in the correct format for the dynamic fork, which creates a number of tasks depending on the number of devices in the inventory."]},{"l":"SUB_WORKFLOW","p":["This is the dynamic fork sub-workflow. In this case, it creates UNICONFIG_write_structured_device_data for every individual device in the inventory. You can then get detailed information on the progress and succession of every device."]},{"l":"UNICONFIG_calculate_diff","p":["This remote procedure call creates a difference between the actual UniConfig topology devices and the intended UniConfig topology nodes."]},{"l":"UNICONFIG_dryrun_commit","p":["This remote procedure call resolves the difference between actual and intended device configurations. After all changes are applied, the cli-dryrun journal is read and a remote procedure call output is created and returned."]},{"l":"UNICONFIG_commit","p":["This is the final task that actually commits the intended configuration to the devices."]},{"i":"demo-l3vpn","l":"Demo “L3VPN”","p":["On the FRINX Dashboard, open menu in the top-left corner and select on L3VPN Automation.","Select Services.","Select + Add service.","Fill in the information as shown below. Select the chain icon to automatically generate the VPN ID.","FRINX Machine dashboard","Select Save changes.","You are redirected to the previous page.","Select Commit changes.","Select Commit changes again.","After committing, you can see all executed tasks and sub-workflows. Select Go to detail to review individual processes."]},{"i":"step-1","l":"Step 1.","p":["Navigate back to the L3VPN Automation page.","Select Sites.","Locate the test_site_3b9UQL4i entry.","FRINX Machine dashboard","For test_site_3b9UQL4i, select Manage and Site network access.","Select Add network access."]},{"i":"step-2","l":"Step 2.","p":["BFD Profile: 500ms","Bgp Profiles: 300ms","BTM Circuit Reference: CES00000000-05","Devices: Select one of the CPE devices.","Enter the following settings:","FRINX Machine dashboard","General and Service","IP Connection","Maximum Routes: 2000","Routing Protocol:","Select + Create Static Protocol.","Select Save Changes.","Static Routing Lan Tag: 999","Static Routing LAN: 10.0.0.0/8","Static Routing Next Hop: 10.0.0.1","SVC Input Bandwith (Mbsp): 1000","SVC Output Bandwith (Mbps): 1000","To automatically generate a provider and customer address, select the chain icon:","VPN Attachment: GNS00001002"]},{"i":"step-3","l":"Step 3.","p":["Select Commit Changes.","FRINX Machine dashboard","Wait until all tasks are completed."]}],[{"l":"Install all devices from inventory","p":["When adding multiple devices to your inventory, it can be tedious to install them individually. To make things easier, we have built a workflow to install all devices present in the inventory.","Follow these instructions to use the workflow:","On the landing page, select Workflow Manager. Then select Explore and search for the workflow called Install_all_from_inventory.","Search for install_all_from_inventory","After searching, select the Execute button (blue play icon). A window appears where you can enter the input parameter. This workflow does not require any input if you want to install all uninstalled devices. If you specified a device label when adding devices, you can use this label to determine which devices should be bulk installed. Select \"Execute\" again.","Execute install_all_from_inventory","After you execute, a numeric link appears to the left of the Execute button. The link takes you to a page that shows individual tasks for this workflow, its inputs and outputs, and whether it was successful or unsuccessful. In the \"Input/Output\" tab, you can see both devices that were installed as a result of this workflow and those that were already installed.","Results of the workflow"]}],[{"l":"Policy filter XR","p":["This workflow uses UniConfig to showcase the filtering capabilities of some of our system tasks. It filters through the interfaces of the device, returns the name of the interface based on its description provided by the user and applies chosen policy on that interface.","Supported device: ios-xr -> IOSXR653_1, IOSXR653_2 & IOSXR663_1 not IOS01 & IOS02","Policy creation is not part of this workflow. The chosen policy must exist on the device before running this workflow."]},{"l":"Searching the workflow","p":["Search"]},{"i":"sync--replace","l":"Sync & Replace","p":["We consider it best practice for all workflows that interact with devices to start with the tasks \"Sync from network\" and \"Replace config with oper\". This ensures that the internal databases of the FRINX Machine are in sync with the latest configuration of the device. The input of these tasks is simply the name of the node(device)."]},{"l":"Read device data","p":["The next part is reading the device config. In the UNICONFIG_read_structured_device_data task, you can specify which part of the config you want to read with URI. In this case, we leave the\"URI\" input field empty."]},{"l":"jsonJQ filter","p":["jsonJQ is one of our system tasks that is very useful for filtering data. We use the following query expression:","We search through the whole config, and under the Cisco-IOS-XR-ifmgr-cfg:interface-configurations model we find the interface with a description given by the user. The task returns the name of that interface."]},{"l":"Lambda","p":["Lambda is a generic task that can process any JS code. In this case, we use it to parse the output of the jsonJQ task. jsonJQ returns the name of the interface in a standard decoded format, e.g: \"TenGigE0/0/0/0\". However, we will be using that interface in URI, which means it must be encoded. We can achieve that using a simple JS script:","As an example, we take the interface name TenGigE0/0/0/0 and encode it to TenGigE0%2F0%2F0%2F0."]},{"i":"write--commit","l":"Write & commit","p":["Lastly, we use the output of the lambda task for the configuration. We apply a policy to the interface filtered based on its description."]},{"l":"Example input","p":["Input"]},{"l":"Execution flow"},{"l":"Run of the workflow","p":["Running the workflow","IOSXR653_1 test_map_custom"]}],[{"l":"FRINX UniConfig introduction","p":["The purpose of UniConfig is to manage configuration state and to retrieve operational state of physical and virtual networking devices. UniConfig provides a single API for many different devices in the network. UniConfig can be run as an application on bare metal in a VM or in a container, standalone or as part of our automation solution FRINX Machine. UniConfig has a built-in data store that can be run in memory or with an external database.","UniConfig features"]},{"l":"UniConfig key feature overview","p":["A 'Lazy CLI' feature to suspend and resume connections without having to maintain keepalives","Allows for diffs to be built between actual and intended execution of atomic configuration changes","Can execute commands in parallel on multiple devices","Can read and store proprietary data models from network devices that follow the YANG data model","Choose between NETCONF or RESTCONF to connect to devices","Data export and import via blacklist and whitelist functions","High availability","Offers the ability to do a dry-commit to evaluate the functionality of a configuration before it is executed on devices","Provides snapshots of previous configurations if you need to rollback","Provides subtree filtering capabilities in NETCONF","Provides templates for device configuration","Pushes configuration data to devices via NETCONF or CLI","Python microservices are used to integrate with the FRINX machine","Retrieves and stores current startup and running configuration from mounted network devices","Retrieves operational data from devices via NETCONF or CLI","Subscription to NETCONF notifications via web sockets","Support for 3-phase commit by using NETCONF confirmed-commit","Support for YANG 1.1 and Tail-f actions","Supports PostgreSQL as an external database","The ability to log specific devices as needed","The UniConfig client allows for simple, full-service access to the UniConfig features","The UniConfig UI allows users to interact with the network controller through a web-based user interface","Transactions can be managed on one or multiple devices","Translates between CLI, native model and standard data models (i.e. OpenConfig) via our open-source device library( https://github.com/FRINXio/cli-units)"]},{"i":"uniconfig-enables-users-to-communicate-with-their-network-infrastructure-via-four-options","l":"UniConfig enables users to communicate with their network infrastructure via four options:","p":["Execute & Read API- Unstructured data via SSH and Telnet","OpenConfig API– Translation provided by our open source device library","UniConfig Native API– Direct access to vendor specific YANG data models that are native to the connected devices as well as UniConfig functions (i.e. diff, commit, snapshots, etc.)","UniConfig Native CLI API– Programmatic access to the CLI without the need for translation units (experimental)","Execute & Read capable API: Like Ansible, TCL Scripting or similar products strings can be passed and received through SSH or Telnet via REST API. UniConfig provides the authentication and transportation of data without interpreting it.","OpenConfig API: An API that is translated into device specific CLI or YANG data models. The installation of \"translation units\" on devices is required. FRINX provides an open source library of devices from a variety of network vendors. The open source framework allows anyone to contribute or consume the contents of the expanding list of supported network devices.","UniConfig Native API: A vendor specific YANG data models are absorbed by UniConfig to allow configuration of mounted devices. UniConfig maps vendor specific \"native\" models into it's data store to provide stateful configuration capabilities to applications and users.","UniConfig Native CLI API: Allows for interaction with a devices CLI is programmatic through the API without the use of 'translation units', only a schema file is needed. (This option is currently experimental, contact FRINX for more information.)","UniConfig solution"]},{"l":"UniConfig in a Docker container"},{"l":"Download and activate FRINX UniConfig","p":["Enter the following commands to download, activate and start UniConfig in a Docker container:"]},{"l":"Stop the container","p":["To stop the container type:"]},{"l":"UniConfig as a Java process in a VM or on a host"},{"l":"Download FRINX UniConfig","p":["Click on the link to download a zip archive of the latest FRINX UniConfig: uniconfig-5.0.7.zip By downloading the file you accept the FRINX software agreement: EULA"]},{"l":"Activate FRINX UniConfig","p":["To activate UniConfig, unzip the file, open the directory and run the following command:","For more information on the different arguments run the startup script with the -h flag"]},{"l":"OpenAPI","p":["UniConfig distributions contain '.yaml' file that generates list of all usable RPCs and their examples. You can view it locally or on our hosted version that always shows latest OpenAPI version.","File can be found here:"]},{"l":"Offline Activation","p":["Please contact support@frinx.io for offline activation of UniConfig."]}],[{"l":"User Guide"},{"l":"Basic Concepts","p":["Explanation of basic concepts, principles and mechanisms that exist within UniConfig."]},{"l":"Device Installation","p":["Section that explains device installation process. It covers basic mechanisms that take place when installing and explains parameters that are used in installation along with examples of install request examples. It then covers differences between CLI and NETCONF API."]},{"l":"UniConfig Operations","p":["This section lists various APIs used interact with UniConfig."]},{"l":"UniConfig Procedures","p":["UniConfig operations are actions that are usually inherent to UniConfig and work on their own when set up properly."]},{"l":"SDK","p":["Uniconfig provides a full blown Java based SDK. All Uniconfig operations available over RESTconf are also available when using the SDK."]}],[{"l":"Basic Concepts","p":["UniConfig is a network controller that enables network operators to automate simple and complex procedures in their heterogeneous networks. UniConfig uses CLI, NETCONF and gNMI to connect to network devices and provides a RESTCONF interface on its northbound to provide an API to applications. UniConfig users use clients in various programming languages to communicate from their applications with the controller. FRINX provides a Java client and python workers to integrate with its workflow automation in FRINX Machine. Other clients can be generated from the OpenAPI documentation of the UniConfig API.","UniConfig is stateless and stores all state information before and after transactions in a PostgreSQL database. UniConfig provides transaction capabilities on its northbound API, so that multiple clients can interact with UniConfig at the same time in a well-structured way. In addition, transactions are also supported towards all network devices independent of the capabilities of these devices. Transactions can be rolled back on error automatically and on user demand by specifying a transaction ID from the transaction log. Clients can use an “immediate commit” model (changes sent to the controller get applied to the devices immediately) or a “build and commit” model (changes are staged on the controller until a commit operation pushes all changes in a transaction to one or multiple devices).","To support N+1 redundancy and horizontal scale (meaning adding more controller instances allows the system to serve more network devices and more clients) UniConfig can be deployed together with a load balancer(E.g.: Traefik). The combination of a state-less load balancer and multiple UniConfig instances achieves high availability and supports many network devices and client applications to configure the network.","An open-source device library allows users to connect UniConfig to CLI devices that do not support SDN protocols like NETCONF and gNMI. This library is open to users, independent software vendors and any 3rd party to contribute to and use to achieve their automation goals.","Finally, the UniConfig shell, allows users to interact with all UniConfig operations and the connected devices in a model driven way through CLI.","UniConfig runs in containers, VMs or as application and can be deployed stand-alone or as part of the \"FRINX Machine\" network automation solution."]}],[{"l":"Device installation"},{"i":"device-installation-1","l":"Device installation","p":["Guide explaining installation mechanisms along with both CLI and NETCONF examples."]},{"l":"UniConfig CLI","p":["The CLI southbound plugin enables the Frinx UniConfig to communicate with CLI devices that do not speak NETCONF or any other programmatic API. The CLI service module uses YANG models and implements a translation logic to send and receive structured data to and from CLI devices."]},{"l":"UniConfig Netconf","p":["NETCONF is an Internet Engineering Task Force (IETF) protocol used for configuration and monitoring devices in the network. It can be used to“create, recover, update, and delete configurations of network devices”.NETCONF operations are overlaid on the Remote Procedure Call(RPC) layer and may be described in either XML or JSON."]},{"l":"UniConfig-native CLI","p":["UniConfig-native CLI allows user configuration of CLI-enabled devices using YANG models that describe configuration commands. In UniConfig-native CLI deployment translation units are defined only by YANG models and device-specific characteristics that are used for parsing and serialization of commands. Afterwards, readers and writers are automatically created and provided to translation registry - user doesn’t write them individually. YANG models can be constructed by following of well-defined rules that are explained in Developer Guide.","Network management protocols are used in southbound API of UniConfig Lighty distribution for device installation and communication. Currently, following protocols are supported:","NETCONF (Network Configuration Protocol)","SSH / TELNET"]}],[{"l":"Device installation","p":["Installing is the process of loading device information into UniConfig database. This information is saved in PostgreSQL database and used whenever transaction occurs. When the transaction is finished the connection to device is closed again, until next transaction.","These are the steps of installation process:","creation of UniConfig transaction","creation of mountpoint - connection to device","loading configuration and metadata from mountpoint","closing mountpoint and connection to device","storing synced configuration and metadata to database","closing UniConfig transaction","Node can be installed only once (you will receive error if node has already been installed).","You can specify if you would like to install node on the UniConfig layer. Default value is 'true':","Only 1 node with the same node-id can be installed on UniConfig layer.","It is synchronous: it succeeds only after node is successfully installed it fails in other cases – max-connection-attempts is automatically set to value '1', if different value is not provided in RPC input, database or config file.","Following sections provide deeper explanation of parameters needed for installation, along with example install requests.","Overview of our OpenAPI along with all parameters and expected returns can be found here."]},{"l":"Default parameters","p":["All install parameters (CLI/NETCONF) are set in database when Uniconfig is initializing. Values of these parameters are equal to specific yang model default values. These parameters are used when they are missing in RPC request.","Priority of using install parameters :","Parameter set in install RPC request","Parameter set in database","Default parameter from yang model","Priority of initial writing default parameters into database:","Database already contains default parameters","User defines default parameters into config file","Default values from yang schema file will be saved","Default parameters can be managed (put/read/delete) by user using RESTCONF/Uniconfig-shell.","Definition of default parameters can be also done using config file default-parameters.json. It is placed in config subdirectory together with lighty-uniconfig-config.json.","RPC request - CLI default parameters:","RPC request - NETCONF default parameters:"]},{"l":"Installing CLI device","p":["Install node RPC","List of basic connection parameters that are used for identification of remote device. All of these parameters are mandatory.","node-id- Name of node that represents device in the topology.","cli-topology:host- IP address or domain-name of target device that runs SSH or Telnet server.","cli-topology:port- TCP port on which the SSH or Telnet server on remote device is listening to incoming connections. Standard SSH port is '22', standard Telnet port is '23'.","cli-topology:transport-type- Application protocol used for communication with device - supported options are 'ssh' and 'telnet'.","cli-topology:device-type- Device type that is used for selection of translation units that maps device configuration to OpenConfig models. Supported devices can be found","cli-topology:device-version- Version of device. Use a specific version or * for a generic one. * enables only basic read and write management without the support of OpenConfig models. Here.","cli-topology:username- Username for accessing of CLI management line.","cli-topology:password- Password assigned to username.","uniconfig-config:install-uniconfig-node-enabled- Whether node should be installed to UniConfig and unified layers. By default, this flag is set to 'true'."]},{"l":"Authentication parameters","p":["List of authentication parameters used for identification of remote user utilized for configuration of the device. Username and password parameters are mandatory.","cli-topology:username- Username for accessing of CLI management line.","cli-topology:password- Password assigned to username.","List of parameters that can be used for adjusting of reconnection strategy. None of these parameters is mandatory - if they are not set, default values are set. There are two exclusive groups of parameters based on selected reconnection strategy - you can define only parameters from single group. By default, keepalive strategy is used."]},{"l":"Connection parameters","p":["Following parameters adjust maintaining of CLI session state. None of these parameters are mandatory (default values will be used).","cli-topology:max-connection-attempts- Maximum number of initial connection attempts(default value: 1). If there are unstable devices in the network it might be useful to provide max-connection-attempts higher than the default value. It would try to connect n times before throwing an ssh connection exception.","cli-topology:max-connection-attempts-install- Maximum number of initial connection attempts during install process (default value: 1). If there are unstable devices in the network it might be useful to provide max-connection-attempts-install higher than the default value. It would try to connect n times before throwing an ssh connection exception.","cli-topology:max-reconnection-attempts- Maximum number of reconnection attempts(default value: 1). max-reconnection-attempts is not that necessary to set. Uniconfig does not keep idle sessions open longer than it is necessary."]},{"l":"Storing failed installations","p":["The following parameter allows the user to store the installation in case the device is in some way unreachable.","uniconfig-config:store-failed-installation- If enabled, it will ensure that even if the device is unreachable, it will be stored in the node table in the database. If not set, the default value is false.","When the user sets the flag to true, an additional column called installation-status will be populated with a boolean flag (either SUCCESSFUL for a successful installation, or FAILED for a failed one). This lets the user know that there has been some problem and that the device was not installed correctly. The mount-point information of that node will be stored (unlike with the default value). With this info already stored, the user does not need to reinstall the device, as all the connection information is present in the UniConfig database. Syncing the device or calling a GET Request will try to reconnect to the device and if it is successful, the configuration data will be saved in the datastore and the request will then finish. The installation-status will then change to SUCCESSFUL. The installed device will then behave normally as if the installation was successful in the first place. If the device is still unreachable, the flag will stay FAILED.","This is useful when many devices are being installed in batches and the user doesn't know if they are up or not."]},{"l":"Keepalive strategies","p":["1. Keepalive reconnection strategy","cli-topology:keepalive-delay- Delay between sending of keepalive messages over CLI session. The value should not be set higher than the execution of the longest operation. Default value: 60 seconds.","cli-topology:keepalive-timeout- This parameter defines how much time the CLI layer should wait for a response to keepalive message before the session is closed. Default value: 60 seconds.","cli-topology:keepalive-initial-delay- This parameter defines how much time CLI layer waits for establishment of new CLI session before the first reconnection attempt is launched. Default value: 120 seconds.","The keepalive parameters have two main functions:","keep the idle session open","timeout commands which would block the session forever"]},{"l":"Example of using the connection and keepalive parameters together","p":["For this example let us assume that we are dealing with a prod-like device, which would mean that some devices might have a large config. We would set these parameters:","Connection attempts would give us more flexibility if we work with unstable devices. It would try to ssh 3 times instead of 1 (default value). We should also keep in mind that the process of connecting to a device would take longer because of extra ssh attempts.","Keepalive commands can be set less than time of the installation, because keepalive commands can fit in between of the installation process. An important thing to keep in mind is to set sum of keepalive-delay and keepalive-timeout parameters higher than time of execution of the configuration show command. Otherwise, it could time out during writing out of the configuration to the console. For each type of device it is a different command ( configuration show brief for Ciena devices, show run for Cisco devices, etc.). Assumption is that it should not take more than 240 seconds (sum of keepalive params) to show the whole configuration. This can be appropriately adjusted to our circumstances.","2. Lazy reconnection strategy","command-timeout- Maximal time (in seconds) for command execution. If a command cannot be executed on a device in this time, the execution is considered a failure. Default value: 60 seconds.","connection-establish-timeout- Maximal time (in seconds) for connection establishment. If a connection attempt fails in this time, the attempt is considered a failure. Default value: 60 seconds.","connection-lazy-timeout- Maximal time (in seconds) for connection to keep alive. If no activity was detected in the session and the timeout has been reached, connection will be stopped. Default value: 60 seconds."]},{"l":"Journaling parameters","p":["The following parameters relate with tracing of executed commands. It is not required to set these parameters.","cli-topology:journal-size- Size of the cli mount-point journal. Journal keeps track of executed commands and makes them available for users/apps for debugging purposes. Value 0 disables journaling(it is default value).","cli-topology:dry-run-journal-size- Creates dry-run mount-point and defines number of commands in command history for dry-run mount-point. Value 0 disables dry-run functionality (it is default value).","cli-topology:journal-level- Sets how much information should be stored in the journal. Option 'command-only' stores only the actual commands executed on device. Option 'extended' records additional information such as: transaction life-cycle, which handlers were invoked etc."]},{"l":"Parsing parameters","p":["Parsing strategies are used for:","Recognizing of structure in cached device configuration that is represented in textual format.","Extraction of target sections from structured format of device configuration.","Parsing engine can be configured on creation of mountpoint by specification of parsing-engine leaf value. Currently, there are three supported CLI parsing strategies: tree-parser(default strategy), batch-parser and one-line-parser.","Both batch-parser and tree-parser depend on current implementation of'CliFlavour' which defines device-specific CLI patterns. For example, if 'CliFlavour' doesn't correctly specify format of 'show configuration' command, then neither batch-parser or tree-parser is applied and commands are sent directly to device."]},{"l":"Tree-parser","p":["It is set as default parsing engine in case you choose to not use'parsing-engine' parameter.","Running-configuration is mapped into the tree structure before the first command lookup is executed from translation unit. Afterwards, this tree can be reused in the same transaction for faster lookup process (for example, one 'sync-from-network' task is executed in one transaction).","Tree-parser is faster than batch-parser in most cases because device configuration must be traversed only once and searching for target section in parsed tree structure has only logarithmic time complexity. The longer the device configuration is, the better performance improvement is achieved using this parsing strategy.","Both batch-parser and tree-parser should be capable to parse the same device configurations (in other words, tree-parser doesn't have any functional restrictions in comparison to batch-parser)."]},{"l":"Batch-parser","p":["Running-configuration must be traversed from the beginning each time when new target section is extracted from the configuration (such lookup process is launched from CLI translation units).","Internally, this parser uses regular expressions to recognize structure of configuration and find target section. From this reason, if configuration is long, this batch-parser becomes ineffective to extract sections that are placed near the end of device configuration.","Batch-parser should be used only as fallback strategy in the case when tree-parser fails."]},{"l":"One-line-parser","p":["CLI parsing engine that stores configuration in the cache in the form of blocks and then uses grep function for parsing running-configuration"]},{"l":"Cisco IOX XR Example request"},{"l":"Junos Example request"},{"l":"Uninstalling CLI device","p":["Uninstall node RPC"]},{"l":"Example request"},{"l":"Installing Netconf device"},{"l":"Identification of remote device","p":["List of basic connection parameters that are used for identification of remote device. Only tcp-only parameter must not be specified in input of the request.","node-id- Name of node that represents device / mount-point in the topology.","netconf-node-topology:host- IP address or domain-name of target device that runs NETCONF server.","netconf-node-topology:port- TCP port on which NETCONF server is listening to incoming connections.","netconf-node-topology:tcp-only- If it is set to 'true', NETCONF session is created directly on top of TCP connection. Otherwise,'SSH' is used as carriage protocol. By default, this parameter is set to 'false'."]},{"i":"authentication-parameters-1","l":"Authentication parameters","p":["Parameters used for configuration of the basic authentication method against NETCONF server. These parameters must be specified in the input request.","network-topology:username- Name of the user that has permission to access device using NETCONF management line.","network-topology:password- Password to the user in non-encrypted format.","There are also other authentication parameters if different authentication method is used - for example, key-based authentication requires specification of key-id. All available authentication parameters can be found in netconf-node-topology.yang under netconf-node-credentials grouping."]},{"l":"Session timers","p":["The following parameters adjust timers that are related with maintaining of NETCONF session state. None of these parameters are mandatory(default values will be used).","netconf-node-topology:initial-connection-timeout- Specifies timeout in seconds after which initial connection to the NETCONF server must be established (default value: 20 s).","netconf-node-topology:request-transaction-timeout- Timeout for blocking RPC operations within transactions (default value: 60 s).","netconf-node-topology:max-connection-attempts- Maximum number of connection attempts (default value: 1).","netconf-node-topology:max-reconnection-attempts- Maximum number of reconnection attempts (default value: 0 - disabled).","netconf-node-topology:between-attempts-timeout- Initial timeout between reconnection attempts (default value: 2 s).","netconf-node-topology:reconnenction-attempts-multiplier- Multiplier between subsequent delays of reconnection attempts (default value: 1.5).","netconf-node-topology:keepalive-delay- Delay between sending of keepalive RPC messages (default value: 120 sec).","netconf-node-topology:confirm-commit-timeout- The timeout for confirming the configuration by \"confirming-commit\" that was configured by \"confirmed-commit\". Configuration will be automatically reverted by device if the \"confirming-commit\" is not issued within the timeout period. This parameter has effect only on NETCONF nodes. (default value: 600 sec)."]},{"l":"Capabilities","p":["Parameters related to capabilities are often used when NETCONF device doesn't provide list of YANGs. Both parameters are optional.","netconf-node-topology:yang-module-capabilities- Set a list of capabilities to override capabilities provided in device's hello message. It can be used for devices that do not report any yang modules in their hello message.","netconf-node-topology:non-module-capabilities- Set a list of non-module based capabilities to override or merge non-module capabilities provided in device's hello message. It can be used for devices that do not report or incorrectly report non-module-based capabilities in their hello message.","Instead of defining netconf-node-topology:yang-module-capabilities, we can just define folder with yang schemas netconf-node-topology:schema-cache-directory: folder-name. For more information about using the netconf-node-topology:schema-cache-directory parameter, see RST Other parameters."]},{"l":"UniConfig-native","p":["Parameters related to installation of NETCONF or CLI nodes with uniconfig-native support.","uniconfig-config:uniconfig-native-enabled- Whether uniconfig-native should be used for installation of NETCONF or CLI node. By default, this flag is set to 'false'.","uniconfig-config:install-uniconfig-node-enabled- Whether node should be installed to UniConfig and unified layers. By default, this flag is set to 'true'.","uniconfig-config:sequence-read-active- Forces reading of data sequentially when mounting device. By default, this flag is set to'false'. This parameter has effect only on NETCONF nodes.","uniconfig-config:whitelist- List of root YANG entities that should be read. This parameter has effect only on NETCONF nodes.","uniconfig-config:blacklist- List of root YANG entities that should not be read from NETCONF device due to incompatibility with uniconfig-native or other malfunctions in YANG schemas. This parameter has effect only on NETCONF nodes.","uniconfig-config:validation-enabled- Whether validation RPC should be used before submitting configuration of node. By default, this flag is set to 'true'. This parameter has effect only on NETCONF nodes.","uniconfig-config:confirmed-commit-enabled- Whether confirmed-commit RPC should be used before submitting configuration of node. By default, this flag is set to 'true'. This parameter has effect only on NETCONF nodes.","uniconfig-config:store-failed-installation- Whether the installation should be stored in the database if it fails (e.g. is unreachable). The node will be 'installed' even though it failed and the user has 2 options:","uninstall the device and reinstall it.","call sync-from-network to sync the data from the device."]},{"l":"Flags","p":["Non-mandatory flag parameters that can be added to mount-request.","netconf-node-topology:enabled-strict-parsing- Default value of enabled-strict-parsing parameter is set to 'true'. This may inflicts in throwing exception during parsing of received NETCONF messages in case of unknown elements. If this parameter is set to 'false', then parser should ignore unknown elements and not throw exception during parsing.","netconf-node-topology:enabled-notifications- Default value of enabled-notifications is set to 'true'. If it is set to 'true' and NETCONF device supports notifications, NETCONF mountpoint will expose NETCONF notification and subscription services.","netconf-node-topology:reconnect-on-changed-schema- Default value of reconnect-on-changed-schema is set to 'false'. If it is set to 'true', NETCONF notifications are supported by device, and NETCONF notifications are enabled ('enabled-notifications' flag), the connector would auto disconnect/reconnect when schemas are changed in the remote device. The connector subscribes (right after connect) to base netconf notifications and listens for netconf-capability-change notification","netconf-node-topology:streaming-session- Default value of streaming-session parameter is set to 'false'. NETCONF session is created and optimized for receiving of NETCONF notifications from remote server."]},{"l":"Other parameters","p":["Other non-mandatory parameters that can be added to mount-request.","netconf-node-topology:schema-cache-directory- This parameter can be used for two cases:","Explicitly set name of NETCONF cache directory. If it is not set, the name of the schema cache directory is derived from device capabilities during mounting process.","Direct usage of the 'custom' NETCONF cache directory stored in the UniConfig 'cache' directory by name. This 'custom' directory must exist, must not be empty and also can not use the 'netconf-node-topology:yang-module-capabilities' parameter, because capability names will be generated from yang schemas stored in the 'custom' directory.","netconf-node-topology:dry-run-journal-size- Creates dry-run mount-point and defines number of NETCONF RPCs in history for dry-run mount-point. Value 0 disables dry-run functionality (it is default value).","netconf-node-topology:custom-connector-factory- Specification of the custom NETCONF connector factory. For example, if device doesn't support candidate data-store, this parameter should be set to 'netconf-customization-alu-ignore-candidate' string (default value is \"default\").","netconf-node-topology:edit-config-test-option- Specification of the test-option parameter in the netconf edit-config message. Possible values are 'set', 'test-then-set' or 'test-only'. If the edit-config-test-option is not explicitly specified in the mount request, then the default value will be used ('test-then-set'). See RFC-6241 for more information about this feature.","netconf-node-topology:concurrent-rpc-limit- Defines maximum number of concurrent RPCs, where 0 indicates no limit (it is default value).","There are additional install parameters in our OpenAPI, they can all be found here."]},{"l":"Example netconf request"},{"l":"Uninstalling Netconf device"},{"i":"example-request-1","l":"Example request"},{"l":"Installing SNMP agent"},{"l":"Identification of remote agent","p":["List of basic connection parameters that are used for identification of remote agent.","node-id- Name of node that represents device / mount-point in the topology.","snmp-topology:host- IP address or domain-name of target device where SNMP agent is running.","snmp-topology:port- SNMP port on which SNMP agent is listening to incoming connections."]},{"l":"SNMP parameters","p":["snmp-topology:transport-type- UniConfig currently supports UDP for SNMP communication, with plans to add TCP support in the future.","snmp-topology:snmp-version- UniConfig currently supports V1 and V2c version of the SNMP, with plans to add V3 support in the future.","snmp-topology:connection-retries- Sets the number of retries to be performed before a request is timed out. Default value is 0.","snmp-topology:request-timeout- Timeout in milliseconds before a confirmed request is resent or timed out. Default value is 3000.","snmp-topology:get-bulk-size- The maximum number of values that can be returned in a single response to the get-bulk operation. Default value is 50."]},{"i":"authentication-parameters-2","l":"Authentication parameters","p":["snmp-topology:community-string- UniConfig currently supports only security string as authentication method that is used with V1 and V2c."]},{"l":"Others","p":["snmp-topology:mib-repository- Name of the MIB repository that contains MIB files."]},{"i":"example-request-2","l":"Example request"},{"l":"Uninstalling SNMP agent"},{"i":"example-request-3","l":"Example request"}],[{"l":"UniConfig CLI"},{"l":"Introduction","p":["The CLI southbound plugin enables the Frinx UniConfig to communicate with CLI devices that do not speak NETCONF or any other programmatic API. The CLI service module uses YANG models and implements a translation logic to send and receive structured data to and from CLI devices. This allows applications to use a service model or unified device model to communicate with a broad range of network platforms and SW revisions from different vendors.","Much like the NETCONF southbound plugin, the CLI southbound plugin enables fully model-driven, transactional device management for internal and external OpenDaylight applications. In fact, the applications are completely unaware of underlying transport and can manage devices over the CLI plugin in the same exact way as over NETCONF.","Once we have installed the device, we can present an abstract, model-based network device and service interface to applications and users. For example, we can parse the output of an IOS command and return structured data.","CLI southbound plugin"]},{"l":"Architecture","p":["This section provides an architectural overview of the plugin, focusing on the main components."]},{"l":"CLI topology","p":["The CLI topology is a dedicated topology instance where users and applications can:","install a CLI device,","uninstall a device,","check the state of connection,","read/write data from/to a device,","execute RPCs on a device.","This topology can be seen as an equivalent of topology-netconf, providing the same features for netconf devices. The topology APIs are YANG APIs based on the ietf-topology model. Similarly to netconf topology, CLI topology augments the model with some basic configuration data and also some state to monitor mountpoints."]},{"l":"CLI mountpoint","p":["The plugin relies on MD-SAL and its concept of mountpoints to expose management of a CLI device. By exposing a mountpoint into MD-SAL, it enables the CLI topology to actually access the device's data in a structured/YANG manner. Components of such a mountpoint can be divided into 3 distinct layers:","Service layer - implementation of MD-SAL APIs delegating execution to transport layer.","Translation layer - a generic and extensible translation layer. The actual translation between YANG and CLI takes place in the extensions. The resulting CLI commands are then delegated to transport layer.","Transport layer - implementation of various transport protocols used for actual communication with network devices.","The following diagram shows the layers of a CLI mountpoint:"]},{"l":"Translation layer","p":["The CLI southbound plugin is as generic as possible. However, the device-specific translation code (from YANG data -\\ CLI commands and vice versa), needs to be encapsulated in a device-specific translation plugin. E.g. Cisco IOS specific translation code needs to be implemented by Cisco IOS translation plugin before FRINX UniConfig can manage IOS devices. These translation plugins in conjunction with the generic translation layer allow for a CLI mountpoint to be created."]},{"l":"Device specific translation plugin","p":["Device specific translation plugin is a set of:","YANG models","Data handlers","RPC implementations","that actually","defines the model/structure of the data in FRINX UniConfig","implements the translation between YANG data and device CLI in a set of handlers","(optionally) implements the translation between YANG RPCs and device CLI","The plugin itself is responsible for defining the mapping between YANG and CLI. However, the translation layer into which it plugs in is what handles the heavy lifting for it e.g. transactions, rollback, config data storage etc. Additionally, the SPIs of the translation layer are very simple to implement because the translation plugin only needs to focus on the translations between YANG <-\\ CLI."]},{"l":"Units","p":["In order to enable better extensibility of the translation plugin and also to allow the separation of various aspects of a device's configuration, a plugin can be split into multiple units. Where a unit is actually just a subset of a plugin's models, handlers and RPCs.","A single unit will usually cover a particular aspect of device management e.g. the interface management unit.","Units can be completely independent or they can build on each other, but in the end (in the moment where a device is being installed) they form a single translation plugin.","Each unit has to be registered under a specific device type(s) e.g. an interface management unit could be registered for various versions of the IOS device type. When installing an IOS device, the CLI southbound plugin collects all the units registered for the IOS device type and merges them into a single plugin enabling full management.","The following diagram shows an IOS device translation plugin split into multiple units:","IOS translation plugin"]},{"l":"Transport layer","p":["For now, two transport protocols are supported:","SSH","Telnet","They implement the same APIs, which enables the translation layer of the CLI plugin to be completely independent of the underlying protocol in use. Deciding which transport will be used to manage a particular device is simply a matter of install-request configuration.","The transport layer can be specified using install-request'cli-topology:transport-type' parameter."]},{"l":"Data processing","p":["There are 2 types of data depending on data-store in which data is stored:","Config","Operational","This section details how these data types map to CLI commands.","Just as there are 2 types of data, there are 2 streams of data in the CLI southbound plugin:","It represents user/application intended configuration for the device.","Translation plugins/units need to handle this configuration in data handlers as C(reate), U(pdate) and D(elete) operations. R(ead) pulls this config data from the device and updates the cache on its way back.","Config data","It represents actual configuration on the device, optionally statistics from the device.","Translation plugins/units need to pull these data out of the device when R(ead) operation is requested.","Operational data","RPCs stand on their own and can encapsulate any command(s) on the device."]},{"l":"RPCs provided by CLI layer","p":["There are multiple RPCs that can be used to send commands to a CLI session and optionally wait for command output. The CLI layer also provides one additional RPC for computing configuration coverage by cli-units. To use all of these RPCs, it is required to have an installed CLI device in the 'Connected' state."]},{"i":"rpc-execute-and-read","l":"RPC: Execute-and-read"},{"l":"Description","p":["Execution of the sequence of commands specified in the input. These commands must be separated by the new line - then, each of the command is executed separately.","After all commands are executed, it is assumed, that the original command prompt (prompt that was set before execution of this RPC) appears on the remote terminal.","If the input contains only single command, output of this RPC will contain only output of this command. If input contains multiple commands separated by newline, output of this RPC will be built from command prompts (except the prompt of the first command), input commands and outputs returned from remote terminal."]},{"l":"Example","p":["Following RPC demonstrates listing of all interfaces with configured IP addresses plus listing of available routing protocols that can be enabled from global configuration mode. Since the last entered command is placed in configuration mode (for example, starting with'Router(config)#'), it is required to return back to Privileged EXEC mode (for example, starting with 'Router#') using 'end' command and'no' confirmation to not save changes. Also, 'wait-for-output-timer' is configured to 2 seconds - CLI layer waits for command output returned from device up to 2 seconds.","Remember that the last command prompt must equal to original prompt otherwise CLI session fails on timeout and CLI mountpoint must be recreated.","RPC reply with unescaped output string (output can be easily unescaped with 'printf' linux application):","Description of RPC-request input body fields:","command(mandatory) - The list of commands that are sent to device. Commands must be separated by newline character. Every command-line is executed separately.","wait-for-output-timer(optional) - By default (if this parameter is not set or set to 0), outputs from entered commands are collected after caught echo of the next typed command in CLI session (or command prompt, if the command is the last one from input sequence). Then, the collected output contains output of the previous command + echo of the current command that hasn't been executed by sending newline character yet. This process is simplified by setting'wait-for-output-timer' value. In this case,'waiting-for-command-echo' procedure is not applied, rather next command is executed only after specified number of seconds after which the reply from CLI session should already be available (if it won't be available, then command output will be read after execution of the next command - outputs can be messed up).","error-check(optional) - By default, UC does not check for errors in commands. If error-handling is enabled and an error occurs, RPC will fail."]},{"l":"Wait-for-echo behaviour","p":["The comparison between described wait-for-echo approaches can be demonstrated in the steps of processing 2 command-lines:","'wait-for-output-timer' is not set or it set to value 0","write command 1","wait for command 1 echo","hit enter","write command 2","wait for command 2 echo","read until command prompt appears","'wait-for-output-timer' is specified in request","read output until timeout expires","Even if the 'wait-for-output-timer' is configured, the last output must equal to original command-prompt."]},{"i":"rpc-execute-and-expect","l":"RPC: Execute-and-expect"},{"i":"description-1","l":"Description","p":["It is a form of the 'execute-and-read' RPC that additionally may contain 'expect(..)' patterns used for waiting for specific outputs/prompts. It can be used for execution of interactive commands that require multiple subsequent inputs with different preceding prompts.","The body of 'expect(..)' pattern must be specified by Java-based regular expression typed between the brackets (see https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html","documentation about regular expressions used in Java language).","'expect(..)' pattern can only be used for testing of previous command line output including next command prompt. From this reason, it is also a suitable tool for testing of specific command prompts.","'expect(..)' pattern must be specified on the distinct line. If multiple 'expect(..)' patterns are chained on neighboring lines, then all of them must match previous output (patterns are joined using logical AND operation).","Output of this RPC reflects the whole dialogue between Frinx UniConfig client and remote terminal except the initial command-prompt.","'wait-for-output-timer' parameter can also be specified in this RPC","but in this case, it applies only for non-interactive commands - commands that are not followed by 'expect(..)' pattern. It is possible to mix interactive and non-interactive commands in input command snippet.","If 'expect' pattern doesn't match previous output, Execute-and-expect RPC will fail on timeout (fixed 3 seconds) for reading next input and CLI session will drop immediately."]},{"i":"example-1","l":"Example","p":["The following RPC requests shows execution of interactive command for copying of file from TFTP server. The CLI prompt subsequently ask for source filename and destination filename. These prompts are asserted by'expect(..) pattern. The last 'expect(..) pattern just waits for confirmation about number of copied bytes.","RPC reply with unescaped output string (output can be easily unescaped with 'printf' linux application):","Backslash is a special character that must be escaped in JSON body. From this reason, in the previous example, there are two backslashes proceeding regular-expression constructs.","If 'execute-and-expect' command field doesn't contain any 'expect(..)' patterns, it will be evaluated in the same way like 'execute-and-read' RPC."]},{"i":"rpc-execute-and-read-until","l":"RPC: Execute-and-read-until"},{"i":"description-2","l":"Description","p":["It is form of the 'execute-and-read' RPC that allows to explicitly specify 'last-output' that CLI expect at the end of commands executions (after the last command has been sent to device).","If explicitly specified 'last' output is not found at the end of the output, again, the session will be dropped and recreated similarly to behaviour of 'execute-and-read' RPC."]},{"i":"example-2","l":"Example","p":["The following request shows sending of the configuration snippet for disabling of automatic network summary (RIP routing protocol). After executing of these commands, command prompt is switched to'RP/0/0/CPU0:XR5(config-rip)#' - it is not the same like initial command prompt 'RP/0/0/CPU0:XR5#'. From this reason it is required to return back to initial command prompt by sending of additional commands or specification of 'last-output' as it is demonstrated in this example.","RPC reply with unescaped output string (output can be easily unescaped with 'printf' linux application):","Set 'last-output' is saved within current CLI session - if you send next 'execute-and-read' RPC, it is assumed that the initial and last output is newly configured 'last-output'."]},{"i":"rpc-execute","l":"RPC: Execute"},{"i":"description-3","l":"Description","p":["Simple execution of single or multiple commands on remote terminal. Multiple commands must be separated by newline in the input. The outputs from commands are not collected - output of this RPC contains only status message.","This RPC can be used in cases where it is not necessary to obtain outputs of entered commands.","After all commands are executed, the last output is not checked against expected output."]},{"i":"example-3","l":"Example","p":["The following example demonstrates 'execute' RPC on creation of simple static route and committing of made change.","RPC reply - output contains just status message:"]},{"i":"rpc-config-coverage","l":"RPC: config-coverage"},{"i":"description-4","l":"Description","p":["RPC reads the entire device configuration, determines the coverage of the configuration by translation units and returns simple or complex output. The user can define a preferred output in RPC input. The default is simple output.","Simple output contains one string that consists of all lines of the device configuration. Each line starts with '+' if it is covered or'-' if not and ends with a '\\n' marker.","Complex output contains a list of commands. Each entry in the list includes the following fields:","'covered', which indicates whether the entire command is covered or not. Can be either 'true' or 'false'.","'non-parsable-parts', which is visible only if the entire command is not covered. Contains a list of those command parts that are not covered. If no parts of the command are covered, only contains the word 'ALL'.","'command', which includes the entire command."]},{"l":"Simple output example","p":["RPC reply:"]},{"l":"Complex output example","p":["RPC reply:"]}],[{"l":"UniConfig NETCONF"},{"l":"Overview","p":["NETCONF is an Internet Engineering Task Force (IETF) protocol used for configuration and monitoring of devices in a network. It can be used to“create, recover, update, and delete configurations of network devices”. The base NETCONF protocol is described in RFC-6241.","NETCONF operations are overlaid on the Remote Procedure Call (RPC) layer and may be described in either XML or JSON."]},{"l":"NETCONF southbound plugin"},{"l":"Introduction to southbound plugin and netconf-connectors","p":["The NETCONF southbound plugin is capable of connecting to remote NETCONF devices and exposing their configuration/operational datastores, RPCs and notifications as MD-SAL mount points. These mount points allow applications and remote users (over RESTCONF) to interact with the mounted devices.","In terms of RFCs, the southbound plugin supports:","Network Configuration Protocol (NETCONF) - RFC-6241","NETCONF Event Notifications - RFC-5277","YANG Module for NETCONF Monitoring - RFC-6022","YANG Module Library - draft-ietf-netconf-yang-library-06","NETCONF is fully model-driven (utilizing the YANG modelling language) so in addition to the above RFCs, it supports any data/RPC/notifications described by a YANG model that is implemented by the device.","By mounting of NETCONF device a new netconf-connector is created. This connector is responsible for:","keeping state of NETCONF session between NETCONF client that resides on FRINX UniConfig distribution and NETCONF server (remote network device)","sending / receiving of NETCONF RPCs that are used for reading / configuration of network device","interpreting of NETCONF RPCs by mapping of their content using loaded device-specific YANG schemas","There are 2 ways for configuring a new netconf-connector: NETCONF or RESTCONF. This guide focuses on using RESTCONF."]},{"l":"Spawning of netconf-connectors while the controller is running","p":["To configure a new netconf-connector (NETCONF mount-point) you need to create a node in configuration data-store under 'topology-netconf'. Adding of new node under NETCONF topology automatically triggers data-change-event that at the end triggers mounting process of the NETCONF device. The following example shows how to mount device with node name 'example' (make sure that the same node name is specified in URI and request body under 'node-id' leaf).","This spawns a new netconf-connector with name 'example' which tries to connect to the NETCONF device at '192.168.1.100' and port '22'. Both username and password are set to 'test' and SSH is used as channel for transporting of NETCONF RPCs (if 'tcp-only' leaf is set to 'true', NETCONF application protocol is running directly on top of the TCP protocol).","Right after the new netconf-connector is created, NETCONF layer writes some useful metadata into the operational data-store of MD-SAL under the network-topology subtree. This metadata can be found at:","Information about connection status, device capabilities, etc. can be found there.","You can check the configuration of device by accessing of'yang-ext:mount' container that is created under every mounted NETCONF node. The new netconf-connector will now be present there. Just invoke:","The response will contain the whole configuration of NETCONF device. You can fetch smaller slice of configuration using more specific URLs under'yang-ext:mount' too."]},{"i":"authentification-with-privatepublic-key","l":"Authentification with private/public key","p":["This type of authentification is used when you want to connect to the NETCONF device via private/public key, it is necessary to save public key into device, then put private key into UniConfig and when trying to configure NETCONF mount-point to connect via ssh key and not password.","To accomplish that, follow these steps :","1. Generate private/public key-pair on your local machine","2. Change .pub format into .bin format","3. Copy public key into device directory. Password of the device will be required.","4.(Optional) Check if the public key is on device","5. Import public key to device","6. Log in with private key to device NETCONF subsystem. Passphrase for key will be required.","7. Start UniConfig and insert keystore with private key into it.","RPC request:","8. Create mount-point with key-id","Delete public key","Login to device, remove rsa public key and after that, it is also possible to delete key from device directory."]},{"l":"PKI Data persistence in NETCONF","p":["PKI data is used for authentication of NETCONF sessions with the provided RSA private key. The corresponding public key must be stored on the device side.","Keys are identified using a unique 'key-id'. This key identifier can be specified in the NETCONF installation request.","Keys can be managed using the 'remove-keystore-entry' and 'add-keystore-entry' operations. These RPC calls are part of the UniConfig transaction. Changes are not applied until they are committed by the user or the immediate commit model is used to invoke the operation.","Keys are stored in the UniConfig database. In a clustered environment, all nodes share the same set of keys."]},{"l":"Registration of the new key","p":["The following request demonstrates how to register a new RSA private key with a key-id of 'key1'. The private key must be specified in the PKCS#8 format. The passphrase is optional and must be specified only if the private key is encrypted.","Multiple keys can be registered at once if the user provides a list of the 'key-credential' in the input."]},{"l":"Removing of the existing key","p":["The following example shows how to remove the existing key 'key1' from UniConfig. It is possible to remove multiple keys at once."]},{"l":"Reading list of the existing keys","p":["The following example shows how to read list of the existing keys from UniConfig.","Note: Both 'passphrase' and 'private-key' are additionally encrypted by the UniConfig encryption system to protect confidential data."]},{"l":"Keepalive settings","p":["If the NETCONF session haven't been created yet, the session is tried to be established only within maximum connection timeout. If this timeout expires before NETCONF session is established, underlay NETCONF channel is closed (reconnection strategy will not be started). After the NETCONF session has been successfully created, there are two techniques how the connection state is kept alive:","TCP acknowledgements- NETCONF is running on top of the TCP protocol that can handle dropped packets by decreasing of window size and resending of lost TCP segments. Working TCP connection doesn't imply working state of the application layer (NETCONF session) - keepalive messages are required too.","Explicit NETCONF keepalive messages- Keepalive messages test whether NETCONF server is alive - server responds to keepalive messages within NETCONF RPC timeout.","If TCP connection is dropped or NETCONF server doesn't respond within keepalive timeout, NETCONF launches reconnection strategy. To summarize it all, there are 3 configurable parameters that can be set in mount-request:","Initial connection timeout [seconds]- Specifies timeout in milliseconds after which initial connection to the NETCONF server must be established. By default, the value is set 20 s.","Keepalive delay [seconds]- Delay between sending of keepalive RPC messages to the NETCONF server. Keepalive messages test state of the NETCONF session (application layer) - whether remote side is able to respond to RPC messages. Default keepalive delay is 120 seconds.","Request transaction timeout [seconds]- Timeout for blocking RPC operations within transactions. Southbound plugin stops to wait for RPC reply after this timeout expires. By default, it is set to 60 s.","Example with set keepalive parameters at creation of NETCONF mount-point(connection timeout, keepalive delay and request timeout):"]},{"l":"Reconnection strategy","p":["Reconnection strategies are used for recovering of the lost connection to the NETCONF server. The behaviour of the reconnection can be described by 3 configurable mount-request parameters:","Maximum number of connection attempts [count]- Maximum number of initial connection retries; when it is reached, the NETCONF won't try to connect to device anymore. By default, this value is set to 1.","Maximum number of reconnection attempts [count]- Maximum number of reconnection retries; when it is reached, the NETCONF won't try to reconnect to device anymore. By default, this value is set to 1.","Initial timeout between attempts [seconds]- The first timeout between reconnection attempts in milliseconds. The default timeout value is set to 2000 ms.","Reconnection attempts multiplier [factor]- After each reconnection attempt, the delay between reconnection attempts is multiplied by this factor. By default, it is set to 1.5. This means that the next delay between attempts will be 3 s, then it will be 4,5 s, etc.","Example with set reconnection parameters at creation of NETCONF mount-point - maximum connection attempts, initial delay between attempts and sleep factor:"]},{"l":"Local NETCONF cache repositories","p":["The netconf-connector in OpenDaylight relies on'ietf-netconf-monitoring' support when connecting to remote NETCONF device. The 'ietf-netconf-monitoring' feature allows netconf-connector to list and download all YANG schemas that are used by the device. These YANG schemas are afterwards used by NETCONF southbound plugin for interpretation of RPCs. The following rules apply for maintaining of local NETCONF cache repositories:","By default, for each device type, the separate local repository is prepared.","All NETCONF repositories are backed up by separate sub-directory under 'cache' directory of UniConfig Distribution.","NETCONF device types are distinguished by unique set of YANG source identifiers - module names and revision numbers. For example, if 2 NETCONF devices differ only in revision of one YANG schema, these NETCONF devices are recognized to have different device types.","Format of the name of generated NETCONF cache directory at runtime is 'schema_id', where 'id' represents unique integer computed from hash of all source identifiers. This generation of cache directory name is launched only at mounting of new NETCONF device and only if another directory with the same set of source identifiers haven't been registered yet.","You can still manually provide NETCONF cache directories with another format before starting of UniConfig Distribution or at runtime - such directories don't have to follow 'schema_id' format.","The NETCONF repository can be registered in 3 ways:","Implicitly by mounting of NETCONF device that has NETCONF monitoring capability and another devices with the same type hasn't already been mounted.","At booting of FRINX UniConfig distribution, all existing sub-directories of 'cache' root directory are registered as separate NETCONF repositories.","At runtime, by invocation of 'schema-resources:register-repository' RPC.","Already registered schema repositories can be listed using following request:","It should return list of ODL nodes in cluster with list of all loaded repositories. Each repository have associated list of source identifiers. See the following example of GET request output:"]},{"l":"Local Netconf default cache repository","p":["Before booting of FRINX UniConfig, the user can put the 'default' repository in the ‘cache’ directory. This directory should contain the most frequently missing sources. As mentioned above, if the device supports ‘ietf-netconf-monitoring’ and there is no directory in the'cache' with all sources that the device requires, then NETCONF will generate directory with name ‘schema_id’, where ‘id’ represents unique integer. The generated repository may not contain all required schemas because device may not provide them. In such case, the missing sources will be searched in the 'default' repository and if sources will be located there, generated repository will be supplemented by the missing sources. In general, there are 2 situations that can occur:","Missing imports","The device requires and provides a resource which for its work requires additional resources that are not covered by provided resources.","Source that is not covered by provided sources","The device requires but does not provide a specific source.","note Using the 'default' directory in the 'cache' directory is optional."]},{"l":"Connecting to a device not supporting NETCONF monitoring","p":["NETCONF connector can only communicate with a device if it knows the set of used schemas (or at least a subset). However, some devices use YANG models internally but do not support NETCONF monitoring. Netconf-connector can also communicate with these devices, but you must load required YANG models manually. In general, there are 2 situations you might encounter:","NETCONF device does not support 'ietf-netconf-monitoring' but it does list all its YANG models as capabilities in HELLO message","This could be a device that internally uses, for example,'ietf-inet-types' YANG model with revision '2010-09-24'. In the HELLO message, that is sent from this device, there is this capability reported as the following string (other YANG schemas can be reported as capabilities in the similar format):","The format of the capability string is following:","[NAMESPACE] - Namespace that is specified in the YANG schema.","[MODULE_NAME] - Name of the YANG module.","[REVISION] - The newest revision that is specified in the YANG schema (it should be specified as the first one in the file). note Revision number is not mandatory (YANG model doesn't have to contain revision number) - then, the capability is specified without the'&' and revision too. For such devices you have to side load all device YANG models into separate sub-directory under 'cache' directory (you can choose random name for this directory, but directory must contain only YANG files of one device type).","NETCONF device does not support 'ietf-netconf-monitoring' and it does NOT list its YANG models as capabilities in HELLO message","Compared to device that lists its YANG models in HELLO message, in this case there would be no specified capabilities in the HELLO message. This type of device basically provides no information about the YANG schemas it uses so its up to the user of OpenDaylight to properly configure netconf-connector for this device. Netconf-connector has an optional configuration attribute called'yang-module-capabilities' and this attribute can contain a list of'yang-module-based' capabilities. By setting this configuration attribute, it is possible to override the 'yang-module-based' capabilities reported in HELLO message of the device. To do this, we need to mount NETCONF device or modify the configuration of existing netconf-connector by adding the configuration snippet with explicitly specified capabilities (it needs to be added next to the address, port, username etc. configuration elements). The following example shows explicit specification of 6 capabilities:","Remember to also put the YANG schemas into the cache folder like in the case 1."]},{"l":"Registration or refreshing of NETCONF cache repository using RPC","p":["This RPC can be used for registration of new NETCONF cache repository or updating of NETCONF cache repository. This is useful when user wants to add new NETCONF cache repository at runtime of FRINX UniConfig distribution for device that doesn't support 'ietf-netconf-monitoring' feature. It can also be used for refreshing of repository contents (YANG schemas) at runtime.","The following example shows how to register a NETCONF repository with name 'example-repository'. The name of the provided repository must equal to name of the directory which contains YANG schemas.","If the repository registration or refreshing process ends successfully, the output contains just set 'status' leaf with 'success' value:","On the other side, if the directory with input 'repository-name' does not exist, directory doesn't contain any YANG files, or schema context cannot be built using provided YANG sources the response body will contain 'failed' 'status' and set 'error-message'. For example, non-existing directory name produces following response:","Constraints:","Only the single repository can be registered using one RPC request.","Removal of registered repositories is not supported for now."]},{"l":"Reconfiguring netconf-connector while the controller is running","p":["It is possible to change the configuration of an already mounted NETCONF device while the whole controller is running. This example will continue where the last left off and will change the configuration for the existing netconf-connector after it was spawned. Using one RESTCONF request, we will change both username and password for the netconf-connector.","To update an existing netconf-connector you need to send following request to RESTCONF:","Since a PUT is a replace operation, the whole configuration must be specified along with the new values for username and password. This should result in a '2xx' response and the instance of netconf-connector called 'example' will be reconfigured to use username 'bob' and password'passwd'. New configuration can be verified by executing:","With new configuration, the old connection will be closed and a new one established."]},{"l":"Destroying of netconf-connector","p":["Using RESTCONF one can also destroy an instance of a netconf-connector - NETCONF connection will be dropped and all resources associated with NETCONF mount-point on NETCONF layer will be cleaned (both CONFIGURATION and OPERATIONAL data-store information). To do this, simply issue a request to following URL:","The last element of the URL is the name of the mount-point."]},{"l":"NETCONF TESTTOOL"},{"l":"Testtool overview","p":["NETCONF testtool is the Java application that:","Can be used for simulation of 1 or more NETCONF devices (it is suitable for scale testing).","Uses core implementation of NETCONF NORTHBOUND server.","Provides broad configuration options of simulated devices.","Supports YANG notifications.","NETCONF testtool is available at netconf repository of ODL( into config/ folder of FRINX UniConfig distribution, this file contains xml paths that should be ignored while removing duplicate nodes from the netconf message","Optional:","put file namespaceBlacklist.txt into config/ folder of FRINX UniConfig distribution, this file contains xml namespaces of the nodes that should be removed from the netconf message","Now UniConfig can be started."]},{"l":"Install SROS device","p":["To install the SROS device run:","Where:","sros: is the name of the device","10.19.0.18: is the IP address of the device","830: is the port number of the device","USERNAME: is the username to access the device","PASSWORD: is the respective password","\"uniconfig-config:uniconfig-native-enabled\": allows to enable installing through UniConfig Native","\"uniconfig-config:install-uniconfig-node-enabled\": allows to disable installing to uniconfig and unified layers","\"uniconfig-config:path\": allows to specify a list of root elements from models present on device to be ignored by UniConfig Native","In case of success the return code is 201."]},{"l":"Check if SROS device is connected","p":["To check if the device is properly connected run:","In case of success the return code is 200, and the response body contains something similar to:"]},{"l":"Check if SROS device configuration is available in UniConfig","p":["To check if the SROS device configuration has been properly loaded in the UniConfig config datastore, run:","In case of success the return code is 200 and the response body contains something similar to:"]}],[{"l":"UniConfig SNMP"},{"l":"Introduction","p":["The SNMP (Simple Network Management Protocol) southbound plugin enables Frinx UniConfig to communicate with an SNMP agent, which is a software module installed on network devices. It collects information about the status, performance, and configuration of these devices.","The SNMP southbound plugin follows a fully model-driven approach, similar to CLI or NETCONF southbound plugins. However, the difference lies in the fact that it uses MIB (Management Information Base) for data modeling instead of YANG."]},{"l":"Architecture","p":["This section provides an architectural overview of the plugin, focusing on the main components."]},{"l":"SNMP topology","p":["The SNMP topology is a dedicated topology instance where users and applications can:","install an SNMP agent,","uninstall an agent,","read device configuration settings or performance metrics"]},{"l":"SNMP mountpoint","p":["The plugin relies on MD-SAL and its concept of mountpoints to expose information about a device. By exposing a mountpoint in MD-SAL, it enables the SNMP topology to access device information in a structured form."]},{"l":"Local SNMP MIB repositories","p":["It is necessary to provide /mibs directory that has to contain:","repository - it is directory that contains mib files. It is possible to use any name.","mib.metadata file - through this file, we inform UniConfig that we have added, removed, or modified some MIB file in the repository. Just insert the repository name and any arbitrary string and UniConfig will update the relevant context for particular repository."]},{"i":"example-of-mibmetadata-file","l":"Example of mib.metadata file"},{"l":"Example of requests","p":["UniConfig currently supports read operation, with plans to add write operation in the future."]},{"l":"GET request"}],[{"l":"Updating installation parameters"},{"l":"Overview","p":["During device installation UniConfig creates a mount-point for this device and stores it in the database. This mount-point contains all parameters set by the user in the installation request. UniConfig supports a feature to update mount-point parameters. It is possible to use it for both NETCONF and CLI nodes."]},{"l":"Show installation parameters","p":["Parameters of the installed devices can be displayed using a GET request on the node. It is necessary to use the right topology. It should return the current node settings. See the following examples:","By default, both NETCONF and CLI topologies have the password parameter encrypted. This can be changed in the corresponding yang schema by adding/removing the extension flag \"frinx-encrypt:encrypt\".","CLI node","Output:","NETCONF node"]},{"l":"Update installation parameters","p":["To update node installation parameters it is possible to use a PUT request with updated request body that is copied from the GET request from the previous section. It is also possible to update single parameter with direct PUT call to specific parameter.","If the password parameter is set to be encrypted, changing it will encrypt the input value.","CLI node","Update multiple parameters. Specifically:","host","dry-run-journal-size","journal-size","Update single parameter:","NETCONF node","keepalive-delay","After these changes, when we use the GET requests from the \"Show installation parameters\" section, then we can see that the parameters have actually been changed. It is also possible to use the GET request for single parameter."]}],[{"l":"UniConfig-native CLI"},{"l":"Introduction","p":["UniConfig-native CLI allows user configuration of CLI-enabled devices using YANG models that describe configuration commands. In UniConfig-native CLI deployment translation units are defined only by YANG models and device-specific characteristics that are used for parsing and serialization of commands. Afterwards, readers and writers are automatically created and provided to translation registry - user doesn't write them individually. YANG models can be constructed by following of well-defined rules that are explained in Developer Guide.","Summarized characteristics of UniConfig-native CLI:","modelling of device configuration using YANG models,","automatic provisioning of readers and writers by generic translation unit,","simple translation units per device type that must define device-characteristics and set of YANG models."]},{"l":"Installation","p":["CLI device can be installed as native-CLI device by adding'uniconfig-config:uniconfig-native-enabled' flag with 'true' value into the mount request (by default, this flag is set to 'false'). It is also required to use tree parsing engine that is enabled by default. All other mount request parameters that can be applied for classic CLI mountpoints can also be used in native-CLI configuration with the same meaning.","The following example shows how to mount Cisco IOS XR 5.3.4 device as native-CLI device with enabled dry-run functionality:","After mounting of CLI node finishes, you can verify CLI mountpoint by fetching its Operational datastore:","You can see that there are some native models included in the'available-capabilities' plus basic mandatory capabilities for CLI mountpoints. Number of supported native capabilities depends on number of written models that are included in native-CLI translation unit for IOS XR 5.3.4, in this case. The only common capability for all native-CLI mountpoints is' http://frinx.io/yang/native/extensions?module=cli-native-extensions'. Sample list of native capabilities:","The synced configuration on UniConfig layer can be verified in the same way as for all types of devices:","Since sample device configuration contains both ACL and interface configuration and native-CLI IOS XR 5.* covers this configuration, the synced data looks like the next output:","The previous sample output corresponds to the following parts of the configuration on the device:"]},{"l":"Architecture","p":["The following section describes building blocks and automated processes that take place in UniConfig-native CLI."]},{"l":"Modules","p":["The following UML diagram shows dependencies between modules from which UniConfig native-cli is built. The core of the system is represented by'native-cli-unit' module in CLI layer that depends on CLI API for registration of units and readers and writers API. On the other side there are CLI-units that extend 'GenericCliNativeUnit'.","Dependencies","Description of modules:","utils-unit and translation-registry-api/spi: CLI layer API which native-cli units depend on. It defines interface for CLI readers/writers, translation unit collector that can be used for registration of native-CLI unit, and common 'TranslateUnit' interface.","native-cli-unit: It is responsible for automatic provisioning and registration of readers and writers (handlers) based on YANG modules that are defined in specific translation units. Readers and writers are initialized only for root container and list schema nodes defined in YANG models. All specific native-CLI units must be derived from abstract class 'GenericCliNativeUnit'.","ios-xr-5-native and junos-17-native: Specific native-CLI units derived from 'GenericCliNativeUnit'. To make native-CLI unit working, it must implement methods that provides list of YANG modules, list of root data object interface, supported device versions, unit name, and CLI flavour."]},{"l":"Registration of handlers","p":["Registration of native-CLI handlers is described by following sequence diagram in detail.","Handlers","Description of the process:","Searching for root schema node: Extraction of the root list and container schema nodes from nodes that are augmented to UniConfig topology.","Building of device template information: Extraction of device template information from imported template YANG modules. This template contains command used for displaying of whole device configuration, format of configuration command, and format of delete command.","Initialization of handlers: Creation of native-CLI config readers and writers or native-CLI list readers and writers in case of list schema nodes.","Registration of handlers: Registration of readers and writers in reader and writer registries. Readers are registered as generic config readers, whereas writers are registered as wildcarded subtree writers.","Since native-CLI readers are not registered as subtree readers, it is possible to directly read only root elements from CLI mountpoint. This constraint is caused by unsupported wildcarded subtree readers in Honeycomb framework."]},{"l":"Functionality of readers","p":["Config readers and config list readers in UniConfig-native CLI are implemented as generic readers that parse device configuration into structuralized format based on registered native-CLI YANG models. These readers are initialized and registered per root data schema node that is supported in native-CLI. The next sequence diagram shows process taken by generic reader on calling 'readCurrentAttributes(..)' method.","Readers","Description of the process:","Creation of the configuration tree: It represents current device configuration by sending of 'show' command which is responsible for displaying of whole device configuration.","Transformation of configuration tree: It is transformed into binding-independent NormalizedNode using 'ConfigTreeStreamReader' component.","Conversion into binding-aware format: Conversion of binding-independent NormalizedNode into binding-aware DataObject and population of DataObject builder by fields from built DataObject.","Configuration is parsed into structuralized form before it is actually transformed into NormalizedNodes (step 1) because of more modular and easier approach. Configuration tree consists of 3 types of nodes:","Command nodes: They are represented by the last identifiers of the commands (command word). These nodes don't have any children nodes.","Section nodes: These nodes are represented by the command word / identifier that opens a new configuration section. Section nodes can have multiple children nodes.","Connector nodes: Connector nodes are similar to section nodes with identifier and multiple possible children nodes. However, they don't open a new configuration section; they represent just one intermediary word in command line.","Example - parsing of interface commands into the tree structure:","Parsing","Detailed description of algorithm for transformation of configuration tree into DOM objects:","Transformation","If some commands are not covered by native-CLI YANG models, the parsing of configuration in readers will not fail - unsupported nodes will be skipped."]},{"l":"Functionality of writers","p":["Config writers and config list writers are responsible for serialization of structuralized data from datastore into series of configuration or delete command lines that are compatible with target device. Native CLI writers are also registered only for root schema nodes on the same paths as readers. The next sequence diagram shows process taken by generic writer on calling 'writeCurrentAttributes(..)' or'deleteCurrentAttributes(..)' method.","Writers","Description of the process:","Conversion into binding-independent format: Conversion of binding-aware DataObject into binding-independent NormalizedNode format. Binding-independent format is more suited for automated traversal and building when the target class types of nodes are not known before compilation of YANG schemas is done.","Generation of command lines: NormalizedNode is serialized using stream writer into configuration buckets that are afterwards serialized into separated command lines. Conversion of configuration buckets into command lines can be customized by different strategies. Currently only the primitive strategy is used - it creates for each leaf command argument the full command line from top root - nesting into configuration modes is not supported. This step is described in detail by next activity diagram.","Generation of configuration or delete command lines: It is done by application of configuration or delete template on command line - for example, JUNOS devices use prefix 'set' for applying of the configuration and prefix 'delete' for removal of configuration from device.","Squashing of command lines into single snippet: This is only optimization step - all command lines are joined together with newline separator.","Sending of command to the device(blocking operation).","Configuration buckets are created as intermediary step because of the modularity and flexibility for application of different serialization strategies in future. There are 3 types of created buckets that are wired with respective schema nodes:","Leaf bucket: Bucket that doesn't have any children but it has a value in addition to the identifier. It is created from LeafNode.","Composite bucket: Bucket with identifier and possibly multiple children buckets. It can be used for following types of DOM nodes: ContainerNode or MapEntryNode.","Delegating bucket: Bucket that doesn't have any identifier, it just delegates configuration to its children buckets. It can be used for nodes that are described by ChoiceNode or MapNode.","Command serialization","The current implementation processes updates in default way - the whole actual configuration is removed and then the whole updated configuration is written back to device. This strategy can cause slow down of the commit operation in case of longer configuration and because of this reason it is addressed as one of the future improvements."]}],[{"l":"UniConfig Operations"},{"i":"sending-and-receiving-data-restconf","l":"Sending and receiving data (RESTCONF)","p":["RESTCONF represents REST API to access datastores and UniConfig operations."]},{"l":"UniConfig Node Manager API","p":["The responsibility of this component is to maintain configuration on devices based on intended configuration. Each device and its configuration is represented as a node in the uniconfig topology and the configuration of this node is described by using OpenConfig YANG models. The Northbound API of Uniconfig Manager (UNM) is RPC driven and provides functionality for commit with automatic rollback and synchronization of configuration from the network."]},{"l":"Device discovery","p":["This component is used to check reachable devices in a network. The manager checks the reachability via the ICMP protocol. Afterwards, the manager is able to check whether various TCP/UDP ports are open or not."]},{"l":"Dry-run Manager API","p":["The manager provides functionality showing CLI commands which would be sent to network element."]},{"l":"Snapshot Manager API","p":["The snapshot manager creates and deletes uniconfig snapshots of actual uniconfig topology. Multiple snapshots can be created in the system."]},{"l":"Subtree Manager API","p":["The subtree manager copies (merge/replace) subtrees between source and target paths."]},{"l":"Templates Manager API","p":["This component is responsible for application of templates into UniConfig nodes."]},{"l":"Transaction Log API","p":["This component is responsible for tracking transactions."]},{"l":"UniConfig Queries","p":["Using this component it is possible to invoke JSONB-path queries on top of the stored configuration."]},{"i":"dedicated-transaction-immediate-commit-model","l":"Dedicated transaction (Immediate Commit Model)","p":["The immediate commit creates new transactions for every call of an RPC. The transaction is then closed so no lingering data will occur."]},{"l":"Utilities","p":["This sub-directory contains UniConfig utilities."]}],[{"l":"JSONB Filtering","p":["Jsonb-filter is a query parameter that is used for filtering data based on one or more parameters. This filter is an effective mechanism for filtering a list of items. Using the jsonb-filter we can retrieve only those list items that meet the defined conditions.","Currently, we have two options of how to use the JSONB filtering functionality."]},{"l":"Database JSONB Filtering","p":["The query parameter is located in the URI. This option is faster because filtering is happening on the database side but this filtering has fewer features."]},{"l":"Application JSONB Filtering","p":["A new Content-Type is added. The query parameter is added in the body. Additional query parameters can be chained (sort by, limit, fields). This request is sent as a POST request. This filtering adds more features, but it is happening on the UniConfig application side which will be slower than the database filtering."]}],[{"l":"Application JSONB Filtering","p":["Application JSONB filtering supports either the dot notation:","or the bracket–notation:"]},{"l":"Jsonb-filter expression","p":["Every filter operation is sent using a POST request. Additionally, a new Content-Type header has been made for application JSONB Filtering. An example can be seen below:","The filter is located in the body of the request, not in the URI. Since it is located in the body, there is no need to escape characters. The body structure looks like this:","If the user wants to filter the list elements based on name, the query filter would look like this:","By default, the filter returns the same output structure as when calling a GET request. There is an option to add the whole parent structure, where the body will look like this:","This will filter out all the elements in the list whose name is foo."]},{"l":"Operators","p":["..",".","[?()]","['' (, '')]","[ (, )]","[start:end]","@","*","$","Array index or indexes.","Array slice operator.","Bracket-notated child or children.","Deep scan. Available anywhere a name is required.","Description","Dot-notated child.","Filter expression. Expression must evaluate to a boolean value.","Operator","Operators mentioned in the table below are used to construct a path.","The current node being processed by a filter predicate.","The root element to query. This starts all path expressions.","Wildcard. Available anywhere a name or numeric are required."]},{"l":"Functions","p":["add an item to the json path output array","append(X)","avg()","concat(X)","Description","Double","Functions can be called at the end of the query path. The input to the function is the output of the path expression. The function output is dictated by the function itself.","Integer","keys()","length()","like input","max()","min()","Operator","Output Type","Provides a concatinated version of the path output with a new item","Provides the average value of an array of numbers","Provides the length of an array","Provides the max value of an array of numbers","Provides the min value of an array of numbers","Provides the property keys (An alternative for terminal tilde ~)","Provides the standard deviation value of an array of numbers","Provides the sum value of an array of numbers","Set","stddev()","sum()"]},{"l":"Filter Operators","p":["!=","<","<=","==","=~",">",">=","A double quote: [?(@.name == \"foo\")]","A single quote: [?(@.name == 'foo')]","anyof","Description","empty","Filters are logical expressions used to filter arrays. A typical filter would be [?(@.age > 18)] where @ represents the current element being processed. More complex filters can be created with logical operators && and ||. String literals must be enclosed by:","in","left (array or string) should be empty","left does not exists in right","left exists in right [?(@.size in ['S', 'M'])]","left has an intersection with right [?(@.sizes anyof ['M', 'L'])]","left has no intersection with right [?(@.sizes noneof ['M', 'L'])]","left is a subset of right [?(@.sizes subsetof ['S', 'M', 'L'])]","left is equal to right (note that 1 is not equal to '1')","left is greater than or equal to right","left is greater than right","left is less or equal to right","left is less than right","left is not equal to right","left matches regular expression [?(@.name =~ /foo.*?/i)]","nin","noneof","Operator","size","size of left (array or string) should match right","subsetof"]},{"l":"Jsonb-filter examples","p":["$..interface[?(@.speed <= $['fast'])]","$..interface[?(@.type =~/.* Csmacd/i)]","$..name","$.ietf-interfaces:interfaces..type","$.ietf-interfaces:interfaces.*","$.ietf-interfaces:interfaces.interface.length()","$.ietf-interfaces:interfaces.interface[-2:]","$.ietf-interfaces:interfaces.interface[-2]","$.ietf-interfaces:interfaces.interface[:2]","$.ietf-interfaces:interfaces.interface[?(@.enabled)]","$.ietf-interfaces:interfaces.interface[?(@.speed >= 10)]","$.ietf-interfaces:interfaces.interface[*].name","$.ietf-interfaces:interfaces.interface[0,1]","$.ietf-interfaces:interfaces.interface[1:2]","$.ietf-interfaces:interfaces.interface[2:]","$.ietf-interfaces:interfaces.interface[2]","All interfaces from index 0 (inclusive) until index 2 (exclusive)","All interfaces from index 1 (inclusive) until index 2 (exclusive)","All interfaces matching regex (ignore case)","All interfaces that are not 'fast'","All interfaces that have the enabled element","All interfaces whose speed is greater or equal than 10","All names","All things under interfaces","Description","Interface number two from tail","JsonPath","Suppose we have the following data, and we want to do some filtering on them.","The first two books","The last two interfaces","The names of all interfaces","The number of interfaces","The second to last book","The third interface","The type of everything"]}],[{"l":"Database JSONB Filtering","p":["The example of using the jsonb-filter query parameter: parent-path?jsonb-filter=expression","PostgreSQL documentation: JSON Functions and Operators"]},{"l":"Jsonb-filter expression","p":["!","!=","{$/Cisco-IOS-XR-ifmgr-cfg:interface-configurations/interface-configuration=%28%23act,GigabitEthernet0/0/0/2%29}","{$/frinx-openconfig-interfaces:interfaces/interface=%28%23MgmtEth0/RP0/CPU0/0%29}","&&","<","<=","<>","==",">",">=","||","Absolute path","Boolean AND","Boolean NOT","Boolean OR","Composite key:","Description","Equality operator","exists","false","Greater-than operator","Greater-than-or-equal-to operator","In this case, a path must be prefixed with $. This path must start with a top-level parent container","In this case, the path must be prefixed with <@>. This path is relative to the parent-path","is unknown","Less-than operator","Less-than-or-equal-to operator","like_regex","Non-equality operator","Non-equality operator (same as !=)","null","Operator","Path","Relative path","Single key:","Sometimes especially absolute paths can contain a key of some item with special characters. In this case it is necessary wrap this key in a special syntax (#example-key-name) and also encode these wrapping symbols - %28%23example-key-name%29. If the key is a composite key, it is necessary to wrap the whole key with these symbols. If the user is not sure if the path contains special characters, it is always recommended to use this special syntax.","starts with","Tests whether the first operand matches the regular expression given by the second operand","The base expression must contain path, operator and value. The jsonb-filter can contain one or more expressions joined with AND(&&) or OR (||) operator. if the && operator is used it must be encoded.","The last element of the jsonb-filter expression is a value based on which the user wants to filter the data.","The path to the data that the users want to filter. The path can be:","true","Value","Value used to perform a comparison with JSON false literal","Value used to perform a comparison with JSON null value","Value used to perform a comparison with JSON true literal","Value/Predicate Description","When the path is constructed then the user can use one of the operators in the table below"]},{"l":"Jsonb-filter examples","p":["1. Examples of using the relative paths in the jsonb-filter","Example of filtering the list of interfaces based on the enabled parameter where the equality operator is used as the operator","Example of filtering the list of interfaces based on the mtu parameter where the less-than is used as the operator","Example of filtering the list of interfaces based on the name parameter where the like_regex is used as the operator","Example of filtering the list of interfaces where a combination of expressions is used","Example of filtering the list of interfaces where the exists operator is used","2. Example of using the absolute path in the jsonb-filter","Example of filtering the list of interfaces based on the name parameter where equality operator is used as the operator. Interface name\"GigabitEthernet0/0/0/2\" is a key value that contains slashes. For this reason, it is necessary to wrap this key into wrapping symbols(#GigabitEthernet0/0/0/) and also encode these symbols%28%23GigabitEthernet0/0/0/2%29."]}],[{"l":"Snapshot Manager","p":["The snapshot manager creates and deletes UniConfig snapshots of actual UniConfig topology. Multiple snapshots can be created in the system.","Snapshots may be used for manual rollback. Manual rollback enables simple reconfiguration of the entire network using one of the previous states saved in snapshots. That means that UniConfig nodes in config datastore are replaced with UniConfig snapshot nodes."]},{"l":"Create snapshot"},{"l":"Delete snapshot"},{"l":"Replace config with snapshot"},{"l":"Obtain snapshot metadata"}],[{"l":"Obtaining snapshots-metadata","p":["Snapshots metadata contains list of created snapshots with the date of creation and list of nodes."]}],[{"l":"RPC create-snapshot","p":["RPC creates a snapshot from the nodes in UniConfig topology. Later, this snapshot can be used for manual rollback. RPC input contains the name of the snapshot topology and nodes that the snapshot will contain. Output of the RPC describes the result of operation and matches all input nodes. You cannot call an RPC with empty target-nodes. If one node failed for any reason, RPC will be fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains the name for the topology snapshot and nodes that the snapshot contains. RPC output contains the result of operation."]},{"l":"Failed Example","p":["The RPC input includes nodes that will be contained in the snapshot, but a snapshot name is missing. RPC output contains the result of the operation."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains a name for the topology snapshot and a node that will be contained in the snapshot. One has not been mounted yet. RPC output contains the result of the operation."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input does not contain the target nodes, so the RPC can not be executed."]}],[{"l":"RPC delete-snapshot","p":["RPC removes the snapshot from CONFIG datastore of UniConfig transaction. RPC input contains the name of the snapshot topology which should be removed. RPC output contains result of the operation."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains the name of the snapshot topology which should be removed. RPC output contains the results of the operation."]},{"l":"Failed example","p":["RPC input contains the name of the snapshot topology which should be removed. The input snapshot name does not exist. RPC output contains the results of the operation."]}],[{"l":"RPC replace-config-with-snapshot","p":["The RPC replaces the nodes in UniConfig topology in the CONFIG datastore with selected nodes from specified snapshot. The RPC input contains the name of the snapshot topology and the target nodes which should replace the UniConfig nodes in the CONFIG datastore. Output of the RPC describes the result of the operation and matches all input nodes. You cannot call an RPC with empty target-nodes. If one node failed for any reason, RPC will be fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. RPC output contains the result of the operation."]},{"l":"Failed Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. The snapshot with name (snapshot2) has not been created yet. RPC output contains the result of the operation."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. The snapshot name is missing in the RPC input. The RPC output contains the result of the operation."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. One node is missing in snapshot1 (IOSXRN). RPC output contains the result of the operation."]},{"i":"failed-example-3","l":"Failed Example","p":["RPC input does not contain the target nodes, so RPC can not be executed."]}],[{"l":"Subtree Manager","p":["The subtree manager copies (merge/replace) subtrees between source and target paths in Configuration or Operational datastore of UniConfig. When one of these RPCs is called, Subtree Manager (SM) reads the configuration from the source path and according to type of operation(merge / replace), copies the subtree data to target path. Target path is a parent path UNDER which data is copied. SM also distinguishes type of source / target datastore.","All RPCs support merging/replacing of configuration between two different schemas ('version drop' feature). This feature is handy, when it is necessary to copy some configuration between two mounted nodes that are described by slightly different YANG schemas. The following changes between schemas are tolerated:","Skipping non-existing composite nodes and leaves,","Adjusting namespace and revision in node identifiers, only name of nodes must match with target schema,","Moving nodes between choice and augmentation schema nodes,","Adjusting value format to target type definition of leaf or leaf-list schema node."]},{"l":"RPC copy-one-to-one","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC copy-one-to-many","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC copy-many-to-one","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC calculate-subtree-diff","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC calculate-subtree-git-like-diff","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC bulk-edit","p":["Applies multiple modifications to a list of target nodes. RPC bulk-edit"]}],[{"l":"RPC bulk-edit","p":["The bulk-edit operation can be used to modify multiple configuration subtrees under multiple target nodes from the 'uniconfig', 'templates' or 'unistore' topology (the same list of modifications are applied to all listed target nodes). The bulk-edit operation is executed atomically - either all modifications are applied on all target nodes successfully, or the operation fails and the configuration is not touched in the UniConfig transaction. This RPC also benefits from parallel processing of changes per target node."]},{"l":"RPC input","p":["RPC input specifies a list of target nodes and a list of modifications that must be applied under target nodes:","Description of input fields:","topology-id(mandatory): Identifier for the topology which contains all target nodes. Currently supported topologies: uniconfig, templates, unistore.","node-id(optional): List of target nodes identifiers residing in the specified topology. If this field is not specified or is empty, RPC is executed on all available nodes in the specified topology.","edit(mandatory with at least 1 entry): List of modifications. Each modification is uniquely identified by the 'path' key. Modifications are applied in the preserved user-defined order.","Description of fields in the edit entry:","path(mandatory): Path encoded using the RFC-8040 format. Specified as relative path to root'configuration' container. If this leaf contains a single character '/', the path points to the whole configuration. If this path contains a list node without key, the operation is applied to all list node elements.","operation(mandatory): Operation that must be executed on the specified path. Supported operations are 'merge', 'replace', and 'remove'. Operations 'merge' and 'replace' requires to also specify input 'data'.","data(optional): Content of the replaced or merged data without wrapping parent element(the last element of the path is not declared in the 'data', see examples on how to correctly specify content of this leaf in different use-cases).","Supported operations:","merge: Supplied value is merged with the target data node.","replace: Supplied value is used to replace the target data node.","remove: Delete target node if it exists."]},{"l":"RPC output","p":["RPC output contains the global status of the executed operation and per-node status.","Description of output fields:","overall-status: Status of operation. If RPC execution fails on at least one of the target nodes, the overall status is set to 'fail'. Otherwise, status is set to 'complete'.","error-message: \"Reason for the failure. Used if there is a structural error in the RPC input that does not relate to one specific target node.\"","node-result: Results of RPC execution divided per target node ('node-id' is the key of the list).","Description of fields in the node-result entry:","node-id: Identifier for the target node.","status: Status of bulk-edit operation on this node. This value is set to 'complete' only if all modifications have been successfully written into UniConfig transaction (including other nodes). Otherwise, the value is set to 'fail'.","error-message: Reason for the failure. This field appears in the output only if RPC execution failed on this target node.","error-type: Categorized error type."]},{"l":"RPC examples"},{"l":"Successful example","p":["The following request demonstrates the application of six (6) modifications to four (4) templates:","Replace the value of the 'description' leaf.","Remove the 'snmp' container.","Replace the whole 'ssh' container.","Merge the configuration of the 'routing-protocol' list entry.","Merge the whole 'tree' list with the specified multiple list entries.","Replace the leaf-list 'services' with the provided array of strings.","The response contains the overall status 'complete' and per-node status 'complete' - all modifications have been successfully written into the UniConfig transaction."]},{"l":"Failed example","p":["The following example demonstrates the execution of a bulk-edit operation that fails on parsing one of the paths using YANG schemas of the device 'dev02'.","The RPC response contains the overall status 'fail'. There is one error message in the result of 'dev02'. Note that the 'dev01' result also contains the 'fail' status, as modifications have not been written to this node since another node ('dev02') failed during execution of the operation."]}],[{"l":"RPC calculate-subtree-diff","p":["This RPC creates a diff between the source topology subtrees and target topology subtrees. Supported features:","Comparison of subtrees under same network-topology node.","Comparison of subtrees between different network-topology nodes that use same YANG schemas.","Comparison of subtrees with different revisions of YANGs schema that are syntactically compatible(for example, different software versions of devices).","RPC input contains data-tree paths ('source-path' and 'target-path') and data locations('source-datastore' and 'target-datastore'). Data location is the enumeration of two possible values, 'OPERATIONAL' and 'CONFIGURATION'. The default value of 'source-datastore' is 'OPERATIONAL' and default value of 'target-datastore' is 'CONFIGURATION'.","RPC output contains a list of differences between source and target subtrees.","RPC calculate-subtree-dif"]},{"l":"RPC Examples"},{"i":"successful-example-computed-difference","l":"Successful example: Computed difference","p":["RPC calculate-subtree-diff input has a path to two different testtool devices with different YANG schemas. Output contains a list of statements representing the diff."]},{"i":"successful-example-no-difference","l":"Successful example: No difference","p":["The following output demonstrates a situation with no changes between specified subtrees."]},{"i":"failed-example-invalid-value-in-input-field","l":"Failed example: Invalid value in input field","p":["RPC calculate-subtree-diff has an improperly defined datastore (AAA) within the input. Output describes the Allowed values [CONFIGURATION, OPERATIONAL]."]},{"i":"failed-example-missing-mandatory-field","l":"Failed example: Missing mandatory field","p":["RPC input does not contain the mandatory source path."]}],[{"l":"RPC calculate-subtree-git-like-diff","p":["This RPC creates a diff between the source topology subtrees and target topology subtrees. Supported features:","Comparison of subtrees under same network-topology node.","Comparison of subtrees between different network-topology nodes that use same YANG schemas.","Comparison of subtrees with different revisions of YANGs schema that are syntactically compatible(for example, different software versions of devices).","RPC input contains data-tree paths ('source-path' and 'target-path') and data locations('source-datastore' and 'target-datastore'). Data location is the enumeration of two possible values, 'OPERATIONAL' and 'CONFIGURATION'. The default value of 'source-datastore' is 'OPERATIONAL' and default value of 'target-datastore' is 'CONFIGURATION'.","RPC output contains differences between source and target subtrees formatted in a git-like style. The changes are grouped by root entities in the configuration."]},{"l":"RPC Examples"},{"i":"successful-example-computed-difference","l":"Successful example: Computed difference","p":["RPC calculate-subtree-git-like-diff input includes the path to two interfaces on different nodes. Both data locations are placed in the CONFIGURATION datastore. Output contains a list of all the changes. Multiple changes that occur under the same root element are merged together."]},{"i":"successful-example-no-difference","l":"Successful example: No difference","p":["The following output demonstrates a situation with no changes between specified subtrees."]},{"i":"failed-example-missing-mandatory-field","l":"Failed example: Missing mandatory field","p":["RPC input does not contain the mandatory target path."]}],[{"l":"RPC copy-many-to-one","p":["RPC input contains:","type of operation - 'merge' or 'replace',","type of source datastore - CONFIGURATION / OPERATIONAL,","type of target datastore - CONFIGURATION / OPERATIONAL,","list of source paths in RFC-8040 URI formatting,","target path in RFC-8040 URI formatting (target path denotes parent entities under which configuration is copied).","Target datastore is optional input field. By default, it is the same as source datastore. Other input fields are mandatory, so it is forbidden to call RPC with missing mandatory field. Output of RPC describes result of copy to target path RPC. If one path failed for any reason, RPC will be failed overall and no modification will be done to datastore - all modifications are done in the single atomic transaction.","Description of RPC copy-many-to-one is on figure below."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The following example demonstrates execution of copy-many-to-one RPC with 3 source paths. Data that is described by these source paths('snmp', 'access', and 'ntp' containers under three different nodes) will be copied under root 'system:system' container ('dev04' node)."]},{"l":"Failed example","p":["The following example shows failed copy-many-to-one RPC. One of the source paths points to non-existing schema node ('invalid:invalid')."]}],[{"l":"RPC copy-one-to-many","p":["RPC input contains:","type of operation - 'merge' or 'replace',","type of source datastore - CONFIGURATION / OPERATIONAL,","type of target datastore - CONFIGURATION / OPERATIONAL,","source path in RFC-8040 URI formatting, list of target paths in RFC-8040 URI formatting (target paths denote parent entities under which configuration is copied).","Target datastore is optional input field. By default, it is the same as source datastore. Other input fields are mandatory, so it is forbidden to call RPC with missing mandatory field. Output of RPC describes result of copy to target paths RPC. If one path failed for any reason, RPC will be failed overall and no modification will be done to datastore - all modifications are done in the single atomic transaction.","Description of RPC copy-one-to-many is on figure below."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The following example demonstrates merging of ethernet interface configuration from single source into interfaces 'eth-0/2' (node'dev02'), 'eth-0/3' (node 'dev02'), 'eth-0/100' (node 'dev03'), and'eth-0/200' (node 'dev03')."]},{"l":"Failed example","p":["The next example shows failed copy-one-to-many RPC - both target paths are invalid since 'ext' list schema nodes doesn't contain'interfaces:interfaces' child container."]}],[{"l":"RPC copy-one-to-one","p":["RPC input contains:","type of operation - 'merge' or 'replace',","type of source datastore - CONFIGURATION / OPERATIONAL,","type of target datastore - CONFIGURATION / OPERATIONAL,","source path in RFC-8040 URI formatting,","target path in RFC-8040 URI formatting (target path denote parent entities under which configuration is copied).","Target datastore is optional input field. By default, it is the same as source datastore. Other input fields are mandatory, so there is forbidden to call RPC with missing mandatory field. Output of RPC describes result of copy to target path operation. If RPC failed for some reason, RPC will be failed and no modification will be done to datastore.","Description of RPC copy-one-to-one is on figure below."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The following example demonstrates coping of whole 'org:orgs' container from 'dev01' to 'dev02' node under 'uniconfig' topology. Replace operation is used."]},{"l":"Failed example","p":["The following example shows failed copy-one-to-one RPC. Input contains specified source datastore (target datastore is the same), merge operation, source path, and target path. In that example target path is invalid, because it doesn't contain 'org:orgs' container in the schema tree."]}],[{"l":"Transaction Log","p":["The transaction log consists of a transaction tracker and a revert-changes RPC. The transaction tracker stores information called transaction-metadata about performed transactions into the operational snapshot. Whereas revert-changes RPC can be used to revert changes that have been made in a specific transaction. A user only need to have ID of transaction for that. One or more transactions can be reverted using one revert-changes RPC."]},{"l":"RPC revert-changes"},{"l":"Transaction tracker"}],[{"l":"RPC revert-changes","p":["This RPC revert changes that were configured within one transaction. If a user wants to revert single transaction or multiple transactions, he must find out transaction-ids and paste them into the body of RPC. The transaction-id is part of the transaction-metadata, that is created by a transaction tracker after commit/checked-commit RPC.","RPC revert-changes updates data only in the CONFIGURATION Snapshot. If we want to write reverted data to the device, we must use RPC commit after RPC revert-changes."]},{"l":"Ignore non-existent nodes","p":["If a user wants to revert multiple transactions, some transactions metadata may contain nodes that do not currently exist in UniConfig. In this case, the RPC fails. The user has a choice of two options:","remove transaction that contain non-existent nodes from the request body","add 'ignore-non-existing-nodes' parameter to the RPC request body with a value of 'true' (default: 'false')","If the user does not use the 'ignore-non-existing-nodes' parameter, the default value 'false' is automatically used."]},{"l":"RPC Examples"},{"l":"Successful examples","p":["Before reverting a transaction we need to know its ID. We will use the GET request to display all stored transaction-metadata.","Reverting changes of a single transaction.","Reverting changes of multiple transactions.","Reverting changes of multiple transactions, where the transaction with id '2c4c1eb5-185a-4204-8021-2ea05ba2c2c1' contains non-existent node'R1'. In this case 'ignore-non-existing-nodes' with a value of 'true' is used, and therefore the RPC will be successful."]},{"l":"Failed example","p":["This is a case when revert-changes request contains a non-existent transaction in the request body.","Reverting changes of multiple transactions, where the transaction metadata with id '2c4c1eb5-185a-4204-8021-2ea05ba2c2c1' contains non-existent node. In this case 'ignore-non-existing-nodes' with a value of 'false' is used, and therefore the RPC fails."]}],[{"l":"Transaction tracker"},{"l":"Introduction","p":["The transaction tracker is responsible for saving a transaction-metadata to the operational snapshot after successfully executed commit/checked-commit RPC. The transaction-metadata contains information about performed transactions, such as:","transaction-id- Identifier of transaction.","type-of-commit-time- Timestamp of either 'last-commit-time', when the transaction was successful or 'failed-commit-time', when the transaction failed. If multiple devices are configured, then the 'last-commit-time' will contain the timestamp of the last update on the last device.","metadata- Items in this field represent nodes that have been configured in the one transaction. Each item contains a diff item with additional information.","diff- Items in this field are a specific changes. Each item contains path to changes, data before change and data after change. In case of a failed transaction this information in not present.","topology- On which topology is a node installed. Can be 'uniconfig' or 'unistore'.","Data-before is visible only if data was updated or deleted. Data-after is visible only if data was updated or created.","transaction-tracker]"]},{"l":"Configuration","p":["The UniConfig stores transaction metadata only if the'lighty-uniconfig-config.json' file contains a \"maxStoredTransactions\" parameter in \"transactions\" container and its value is greater then 0. It is necessary to make this setting before running UniConfig, otherwise parameter \"maxStoredTransactions\" will be '0' (default value) and transaction-log will be disabled."]},{"l":"Show transaction-metadata","p":["The response to this GET request contains all stored transaction-metadata, their IDs and other items such as node-id, updated data before update and after update, etc."]}],[{"l":"UniConfig Node Manager","p":["An additional git like diff RPC was created so it shows all the changes grouped under root elements in a git-like style.","In the case where the configuration of one device fails, the UNM executes automatic rollback where the previous configuration is restored on all modified devices.","RPC calculate-diff","RPC calculate-git-like-diff","RPC check-installed-nodes","RPC checked-commit","RPC commit","RPC compare-config","RPC get-installed-nodes","RPC health","RPC install-multiple-nodes","RPC is-in-sync","RPC replace-config-with-operational","RPC sync-from-network","RPC uninstall-multiple-nodes","RPC validate","Synchronization from the network reads configuration from devices and stores it as an actual state to the OPER DS.","The responsibility of this component is to maintain configuration on devices based on intended configuration. Each device and its configuration is represented as a node in the uniconfig topology and the configuration of this node is described by using OpenConfig YANG models. The Northbound API of Uniconfig Manager (UNM) is RPC driven and provides functionality for commit with automatic rollback and synchronization of configuration from the network.","When a commit is called, the UNM creates a diff based on intended state from CONFIG DS and actual state from OPER DS. This Diff is used as the basis for device configuration. UNM prepares a network wide transaction which uses Unified mountpoints for communication with different types of devices."]}],[{"l":"RPC calculate-diff","p":["This RPC creates a diff between the actual UniConfig topology nodes and the intended UniConfig topology nodes. The RPC input contains a list of UniConfig nodes to calculate the diff. Output of the RPC contains a list of statements representing the diff. It also matches all input nodes. If RPC is called with empty list of target nodes, diff is calculated for each modified node in the UniConfig transaction. If some node fails for any reason, the RPC fails entirely"]},{"l":"RPC Examples"},{"l":"Successful Example","p":["The RPC calculate-diff input has two target nodes and the output contains a list of statements representing the diff."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC calculate-diff input does not contain the target nodes, calculate-diff will be invoked on top of all touched nodes in the transaction.","or"]},{"i":"successful-example-2","l":"Successful Example","p":["The RPC calculate-diff input has target node and there is no diff."]},{"l":"Failed Example","p":["The RPC calculate-diff input has target node. Nodes 'R2' has not been installed yet. The output describes the result of the calculate-diff RPC."]},{"i":"failed-example-1","l":"Failed Example","p":["The RPC calculate-diff input has two target nodes. One of the nodes,'R2', has not been installed yet. The output describes the result of the calculate-diff RPC."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error.","or"]}],[{"l":"RPC calculate-git-like-diff","p":["This RPC creates a diff between the actual UniConfig topology nodes and the intended UniConfig topology nodes. The RPC input contains a list of UniConfig nodes to calculate the diff. Output of the RPC contains a list of statements representing the diff in a git-like style. It checks for every touched node in the transaction if target nodes are not specified in the input. If some node fails, the RPC will fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["The RPC calculate-git-like-diff input has two target nodes and the output contains a list of statements representing the diff."]},{"i":"successful-example-1","l":"Successful Example","p":["The RPC calculate-git-like-diff input has no target nodes specified, so it will look for all touched nodes in the transaction, and the output will contain a list of all changes on different paths. Multiple changes that occur under the same path are merged together."]},{"i":"successful-example-2","l":"Successful Example","p":["The RPC calculate-git-like-diff input has target node and there is no diff."]},{"l":"Failed Example","p":["The RPC calculate-git-like-diff input has target node. Nodes 'R2' has not been installed yet. The output describes the result of the calculate-diff RPC."]},{"i":"failed-example-1","l":"Failed Example","p":["The RPC calculate-git-like-diff input has two target nodes. One of the nodes,'R1', has not been installed yet. The output describes the result of the calculate-git-like-diff RPC."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC check-installed-nodes","p":["This RPC checks if devices included in the input are installed by looking for the database content of each device. If content is found, the device is installed."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains a device while no devices are installed."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains devices (R1 and R2) and device R1 is installed."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains devices (R1 and R2) and both devices are installed."]},{"l":"Failed Example","p":["RPC input does not specify any nodes."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input is missing the target-nodes container."]}],[{"l":"RPC checked-commit","p":["The trigger for execution of the checked configuration is RPC checked-commit. A checked commit is similar to an RPC commit, but it also checks if nodes are in sync with the network before it starts configuration. RPC fails if any node is out of sync. Output of the RPC describes the result of the commit and matches all modified nodes in the UniConfig transaction. If one node failed for any reason, RPC will fail entirely.","In comparison to commit RPC, there is one additional phase between 'lock and validate configured nodes' and 'write configuration into device' phases:","Lock and validate configured nodes","Check if nodes are in-sync with state on devices","Write configuration into device","Validate configuration","Confirmed commit","Confirming commit (submit configuration)","Following diagram captures check if configuration fingerprints in the transaction datastore and device are equal.","There is a difference between fingerprint-based validation in the phases 1 and 2. The goal of the first phase is validation if other transaction has already changed the same node by comparison of fingerprint in the UniConfig transaction and in the database. On the other side, the second phase validates if fingerprint in the transaction equals to fingerprint on the device - if another system / directly user via CLI has updated device configuration since the beginning of the transaction."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["Configuration of nodes 'R1' and 'R2' has been changed in the transaction. Both 'R1' and 'R2' are in-sync with actual state on the device. RPC checked-commit input invoke all touched nodes."]},{"l":"Failed Example","p":["Configuration of nodes 'R1' and 'R2' has been changed in the transaction. Both 'R1' and 'R2' are in-sync with actual state on the device. Node 'R1' has failed due to improper configuration. The output describes the result of the checked-commit RPC."]},{"i":"failed-example-1","l":"Failed Example","p":["Configuration of nodes 'R1' has been changed in the transaction. Node 'R1' is in-sync with actual state on the device. Node 'R1' has failed on the changed fingerprint. The output describes the result of the checked-commit."]},{"i":"failed-example-2","l":"Failed Example","p":["Node 'R2' has lost connection."]},{"i":"failed-example-3","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC commit","p":["1. Lock and validate configured nodes","2. Write configuration into device","3. Validate configuration","4. Confirmed commit","5. Confirming commit (submit configuration)","Configuration phase","Confirmed commit","Confirmed commit - It is used for locking of device configuration, so no other transaction can touch this device. This phase can be skipped with \"do-confirmed-commit\" flag.","Confirming commit","Confirming commit (submit configuration) - Persisting all changes on devices and in the PostgreSQL database. UniConfig transaction is closed.","If one of the nodes uses a confirmed commit (phase 4), which does not fail, then it is necessary to issue the submitted configuration (phase 5) within the timeout period. Otherwise, the node configuration issued by the confirmed commit will be reverted to its state before the confirmed commit (i.e. confirmed commit makes only temporary configuration changes). The timeout period is 600 seconds (10 minutes) by default, but the user can change it in the installation request.","Lock and validate configured nodes - Locking all modified nodes using PostgreSQL advisory locks and validation of fingerprints - if another transaction tries to commit overlapping nodes or different transaction has already changed one of the nodes, then commit will fail at this step.","Locking nodes","Next diagram describe the first phase of commit RPC - locking of changes nodes in the PostgreSQL database and verification if other transaction has already committed overlapping nodes.","Next diagrams describe all 5 commit phases in detail:","Rollback - It is used for restoring of configuration to previous state, if the configuration process fails. When configuring more devices in a single transaction and the process fails on one particular device, the rollback procedure will be applied to all touched devices. This is done by auto rollback procedure, which is by default turned on. It can be switched off by setting up'do-rollback' flag in input of Commit RPC request. Then only failed devices will be rollbacked.","Rollback operation","RPC commit Commit invoke all touched nodes in transaction. There are no target nodes in the RPC input.","The 'skip-unreachable-nodes' flag controls whether unreachable nodes are skipped when the RPC commit is sent. If set to 'true', nodes that are not reachable are skipped and others are configured. The default value is 'false'.","The configuration of nodes consists of the following phases:","The external application stores the intended configuration under nodes in the UniConfig topology. The trigger for execution of configuration is an RPC commit. Output of the RPC describes the result of the commit.","The last diagram shows rollback procedure that must be executed after failed commit on nodes that have already been configured and don't support 'candidate' datastore.","The third and fourth phases take place only on the nodes that support these operations. If one node failed in the random phase for any reason the RPC will fail entirely. After commit RPC, UniConfig transaction is closed regardless of the commit result.","Validate configuration - Validation of written configuration from the view of constraints and consistency. This phase can be skipped with \"do-validate\" flag.","Validation phase","Write configuration into device - Pushing calculated changes into device without committing of these changes."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["UniConfig commits nodes 'R1' and 'R2' that has been changed in the actual transaction."]},{"i":"successful-example-1","l":"Successful Example","p":["Nodes 'R1' and 'R2' has been changed. RPC commit input has the flag to disable confirmed-commit phase. UniConfig commits all touched nodes."]},{"i":"successful-example-2","l":"Successful Example","p":["If there are not any touched nodes, the request will finish successfully."]},{"l":"Failed Example","p":["Node 'R1' has failed because of failed validation phase."]},{"i":"failed-example-1","l":"Failed Example","p":["Node 'R1' has failed because the confirmed commit failed. Validation phase was skipped due to false \"do-validate\" flag."]},{"i":"failed-example-2","l":"Failed Example","p":["Node 'R1' has failed because of the time delay between the confirmed commit and the submitted configuration."]},{"i":"failed-example-3","l":"Failed Example","p":["Node 'R1' has failed due to improper configuration."]},{"i":"failed-example-4","l":"Failed Example","p":["Node 'R1' has lost connection."]},{"i":"failed-example-5","l":"Failed Example","p":["Node 'R1' has failed because of wrong configuration. In this case validation, confirm-commit and auto-rollback were switched off. Because auto-rollback is switched off, configuration of 'R1' device was successful. However, this can be done only if validation and confirm-commit phase were successful or skipped, otherwise configuration of 'R1' device would also fail."]},{"i":"failed-example-6","l":"Failed Example","p":["Configuration of nodes 'R1' nad 'R2' has been changed in the transaction and both are in-sync with actual state on the device. Then connection of node 'R2' has been lost. RPC commit input has the flag to skip unreachable nodes set to true. Result of the commit RPC describes success of 'R1' node and shows list of unreachable nodes."]}],[{"l":"RPC compare-config","p":["This RPC is a combination of the sync-from-network and calculate-diff RPCs. If one of those RPCs fails, this one also fails with no changes made.","The purpose of this RPC is to synchronize configurations from network devices to UniConfig nodes in the Configuration datastore of the UniConfig transaction.","The RPC input contains a list of UniConfig nodes which configuration should be compared to actual configuration in the transaction The output of the RPC describes the result of compare-config and matches all input nodes with a list of statements representing the diff."]},{"l":"RPC Examples"},{"l":"Successful Example"},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, configuration of all touched nodes in the transaction is compared to synced device configuration."]},{"i":"successful-example-2","l":"Successful Example","p":["The RPC compare-config input has target node and there is no diff."]},{"l":"Failed Example","p":["The RPC compare-config input has two target nodes. One of the nodes,'R2', has not been installed yet. The output describes the result of the sync-from-network.","If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]},{"i":"failed-example-1","l":"Failed Example"}],[{"l":"RPC get-installed-nodes","p":["This RPC returns all installed devices from a specified topology.","If no topology is specified, the output may contain devices from multiple topologies (CLI, NETCONF, gNMI). In this case, devices must be installed with the install request parameter \"uniconfig-config:install-uniconfig-node-enabled\" set to \"true\". The RPC with no topology looks for nodes installed under the UNICONFIG topology by default."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The RPC contains no topology defined in input and device called 'R1' is installed in the NETCONF topology. With parameter\"uniconfig-config:install-uniconfig-node-enabled\":\"true\" in install request is installed under UNICONFIG topology."]},{"i":"successful-example-1","l":"Successful example","p":["The RPC input contains no topology and device called 'R1' is installed in the NETCONF topology. With parameter \"uniconfig-config:install-uniconfig-node-enabled\":\"false\" in install request is not installed under UNICONFIG topology."]},{"i":"successful-example-2","l":"Successful example","p":["The RPC input contains the GNMI topology and device called 'R1' is installed in the topology."]},{"i":"successful-example-3","l":"Successful example","p":["The RPC input contains the CLI topology, but no devices are installed in the topology."]}],[{"l":"RPC health","p":["This RPC checks if UniConfig is running. If database persistence is enabled it checks database connection too."]},{"l":"RPC Examples","p":["RPC health input is empty and RPC output contains result of operation.","Response when database persistence is disabled:","Response when database persistence is enabled and database connection is valid:","Response when database persistence is enabled and database connection is not valid:"]}],[{"l":"RPC install-multiple-nodes","p":["This RPC installs multiple devices at once. It uses the default install-node RPC. Devices are installed in parallel."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains two devices (R1 and R2)."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains devices (R1 and R2) and R2 uses two different protocols."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains two devices (R1 and R2) and R2 is already installed using CLI protocol."]},{"l":"Failed Example","p":["RPC input does not specify node-id."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains two devices using the same node-id."]}],[{"l":"RPC is-in-sync","p":["This RPC can be used for verification whether the specified nodes are in-sync with the current state in the Operational datastore of UniConfig transaction. This verification is done by comparison of configuration fingerprints. The configuration fingerprint on the device is compared with the last configuration fingerprint saved in the Operational datastore. A fingerprint is usually represented by a configuration timestamp or the last transaction ID. The is-in-sync feature is supported only for device types that have implemented translation units for the 'frinx-configuration-metadata' OpenConfig module (using cli units, netconf units, or uniconfig-native metadata units).","The RPC input contains a list of UniConfig nodes for which the verification should be completed ('target-nodes' field). Response comprises the operation status for each of the nodes that was specified in the RPC input. If the operation failed it is because the specified node has not been successfully installed or connection has been lost or uniconfig doesn't have support for reading of configuration fingerprint from specific device type. Calling RPC with empty list of target nodes will result in invocation of RPC for each node that has been modified in the UniConfig transaction.","Possible RPC outputs per target node:","'status' field with value 'complete' with set 'is-in-sync' boolean flag; is-in-sync feature is supported and the configuration fingerprints have been successfully compared.","'status' field with value 'fail' with set 'error-type' to'no-connection' and corresponding 'error-message'; Unified mountpoint doesn't exist because the connection has been lost or the node has not been mounted yet.","'status' field with value 'fail' with set 'error-type' to'uniconfig-error' and corresponding 'error-message'; reading of the fingerprint from the Operational datastore or Unified mountpoint has failed, or the configuration metadata parsing is not supported for the device type.","Execution of the 'is-in-sync' RPC doesn't modify the Operational datastore. The configuration fingerprint that is stored in the Operational datastore is not updated. 'Sync-from-network' RPC must be used for updating the last configuration fingerprint and the actual configuration state."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["the RPC input contains valid nodes for which the synchronization status must be checked ('R1' is synced while 'R2' is not synced):"]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, all touched nodes will be invoked."]},{"l":"Failed Example","p":["RPC input contains invalid node, the 'R1' doesn't support comparison of fingerprints(metadata translation unit has not been implemented for this device)."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains 2 nodes, the first one 'R1' is valid and synced, the second one ('R2') has not been installed yet. If there is one invalid node, Uniconfig operation will fail with 1 error entry in the response."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC replace-config-with-operational","p":["RPC replaces the UniConfig topology nodes in the Config datastore with UniConfig topology nodes from the Operational datastore. The RPC input contains a list of the UniConfig nodes to replace from the Operational to the Config datastore of the UniConfig transaction. Output of the RPC describes the result of the operation and matches all input nodes. If RPC is invoked with empty list of target nodes, operation will be invoked for all nodes modified in the UniConfig transaction. If one node failed for any reason, RPC will fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC replace-config-with-operational input has 2 target nodes and the RPC output contains the result of the operation."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, configuration of all touched nodes will be replaced by operational state."]},{"l":"Failed Example","p":["RPC input contains a list of the target nodes. Node 'R1' has not been installed yet. The RPC output contains the result of the operation."]},{"i":"failed-example-1","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC sync-from-network","p":["The purpose of this RPC is to synchronize configuration from network devices to the UniConfig nodes in the Operational datastore of UniConfig transaction. The RPC input contains a list of the UniConfig nodes where the configuration should be refreshed within the network. Output of the RPC describes the result of sync-from-network and matches all input nodes. Calling RPC with empty list of target nodes results in syncing configuration of all nodes that have been modified in the UniConfig transaction. If one node failed for any reason, the RPC will fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains nodes where configuration should be refreshed.","If RPC input does not contain the target nodes, all touched nodes in the transaction will be synced."]},{"l":"Failed Example","p":["RPC input contains a list of nodes where the configuration should be refreshed. Node 'R2' has not been installed yet."]},{"i":"failed-example-1","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC sync-to-network","p":["This RPC is a combination of sync-from-network and commit RPCs. If one of these RPCs fails the RPC will fail without any changes made.","The purpose of this RPC is to synchronize configuration from the UniConfig nodes in the Configuration datastore of UniConfig transaction to network devices. The RPC input contains a list of the UniConfig nodes which are to be updated on a network device. Output of the RPC describes the result of sync-to-network and matches all input nodes. Calling RPC with empty list of target nodes results in syncing configuration of all nodes that have been modified in the UniConfig transaction. If some node fails, the RPC fails entirely.","It is necessary for admin-state of UniConfig nodes, specified in the input, to be set to \"unlocked\"."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains nodes which are to be updated on the corresponding network device."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, operation will be invoked on top of all touched nodes in the transaction."]},{"l":"Failed Example","p":["If one or more input nodes are not set to admin-state 'unlocked' the request will result in an error pointing out nodes with the wrong admin-state."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains only one node with bad admin-state."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input contains 2 nodes, the first one 'R1' is valid, the second one 'R2' has not been installed yet. If there is at least one invalid node, operation will fail."]},{"i":"failed-example-3","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC uninstall-multiple-nodes","p":["This RPC uninstalls multiple devices at once. It uses the default uninstall-node RPC. Devices are uninstalled in parallel."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains two devices (R1 and R2)."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains devices (R1 and R2) and R2 is installed on two different protocols."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains two devices (R1 and R2) and R2 is already uninstalled on CLI protocol."]},{"l":"Failed Example","p":["RPC input does not specify node-id."]}],[{"l":"RPC validate","p":["The external application stores the intended configuration under nodes in the UniConfig topology. The configuration can be validated if it is valid or not. The trigger for execution of configuration validation is an RPC validate. RPC input contains a list of UniConfig nodes which configuration should be validated. Output of the RPC describes the result of the validation and matches all input nodes. It is valid to call this RPC with empty list of target nodes - in this case, all nodes that have been modified in the UniConfig transaction will be validated.","The configuration of nodes consists of the following phases:","Open transaction to device","Write configuration","Validate configuration","Close transaction","If one node failed in second (validation) phase for any reason, the RPC will fail entirely.","The validation (second phase) take place only on nodes that support this operation.","Validate RPC is shown in the figure bellow."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC validate input has 2 target nodes and the output describes the result of the successful validation."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, all touched node in the transaction will be validated."]},{"l":"Failed Example","p":["RPC commit input has 1 target node and the output describes the result of the validation. Node has failed because validation failed."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains 2 nodes, the first one 'R1' is valid, the second one 'R2' has not been installed yet. If there is one invalid node, Uniconfig will be evaluated nodes with fail."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"UniConfig properties","p":["UniConfig properties are application properties used to configure the application. They can be separated into three groups:","Runtime mutable properties can be modified in runtime (using the update-properties RPC), their changes take effect in runtime and the properties are persisted in the database.","Database persisted properties include all runtime mutable properties and some additional properties. These properties are stored in the database, which is always their primary source. With UniConfig Cloud Config, they remain constant across UniConfig instances in the same cluster and cannot be overridden via the application properties file.","Regular UniConfig properties comprise all the remaining properties. These properties can always be changed using the application.properties file and can differ between UniConfig instances.","Database persisted properties can be changed or read in application runtime without restarting UniConfig by using UniConfig Cloud Config and the following RPCs:","RPC read-properties","RPC update-properties"]}],[{"l":"RPC read-properties","p":["The read-properties RPC reads default properties from the database. If a specified property key does not exist in the database, they key is returned in the ignored keys section. The RPC works the same whether UniConfig Cloud Config is enabled or disabled.","read","If UniConfig Cloud Config is disabled, the read-properties RPC reads property values from the database. These values may differ from values in the application instance."]},{"l":"RPC examples"},{"l":"Successful example","p":["RPC input contains default property keys."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains properties that are not default properties or are private (crypto keys and crypto types)."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input consists of properties that do not exist in the database."]}],[{"l":"RPC update-properties","p":["The update-properties RPC is used to update property values. If UniConfig Cloud Config is enabled, it also calls Refresh Bus Endpoint to update properties in runtime for all connected UniConfig instances.","The RPC only updates default properties, except for crypto properties for which there are separate RPCs ( change-encryption-status and change-encryption-keys).","RPC sequence diagram with UniConfig Cloud Config enabled:","update-with-ucc","If UniConfig Cloud Config is disabled, the RPC only updates property values in the the database. The application instance continues to use the old property values, which can cause confusion.","Additionally, if a new UniConfig instance is started after properties have been updated, that instance will use the updated property values from the database. UniConfig instances will therefore use different values for the same property, as described in the diagram below.","We recommend that you use this RPC with UniConfig Cloud Config. The exception is callbacks.access-token, which is always up to date.","RPC sequence diagram with UniConfig Cloud Config disabled:","update-without-ucc"]},{"l":"RPC examples"},{"l":"Successful example","p":["RPC input contains the default properties with correct values."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains the crypto default property."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains an incorrect property key."]},{"l":"Failed example","p":["RPC input contains default properties with incorrect values."]},{"i":"failed-example-1","l":"Failed example","p":["RPC input contains default properties with incorrect values."]}],[{"l":"Utilities","p":["Utilities are simple programs that are part of the UniConfig distribution. After unpacking and building the distribution, utilities can be found in the 'utils' subdirectory."]},{"l":"YANG Packager"},{"l":"Difference between OpenAPI specifications"}],[{"l":"Difference between OpenAPI specifications"},{"l":"Introduction","p":["The Uniconfig distribution includes a program for checking the difference between OpenAPI specifications. After building and unpacking the distribution, you can find the program in the 'utils' directory as a shell script called called 'show_swagger_diff.sh'.","The program uses OpenAPI-diff to generate OpenAPI differences."]},{"l":"Usage","p":["The ./show_swagger_diff.sh script contains four arguments. Each one has its own identifier, so you can give arguments in any order. All arguments are optional as default values are included for each argument.","--former, -f /path/to/former/yaml/files- optional argument. Path to previous OpenAPI specifications (.yaml files). The default path is 'openapi_diff/old'.","--new, -n /path/to/new/yaml/files- optional argument. Path to new OpenAPI specifications (.yaml files). The default path is 'openapi_diff/new'.","--output, -o /path/to/output- optional argument. Path for the html output file with differences. The default path is 'openapi_diff'.","-s- optional argument. Silent printing, includes less information.","Bash script ./show_swagger_diff.sh also includes a simple help facility. There are two options for showing the help text:","./show_swagger_diff.sh -h","./show_swagger_diff.sh --help","The script only accepts YAML files."]},{"l":"Example use case"},{"l":"Default usage","p":["This example shows basic usage of the script with and without optional arguments. Open a terminal and the '../utils' directory, and run the following command:","OR"]},{"l":"Usage with non-existent input path","p":["This example shows basic usage of the script where some specified input directories do not exist. Open a terminal and the '../utils' directory, and run the following command:"]}],[{"l":"YANG packager"},{"l":"Introduction","p":["YANG packager is a simple program which is part of the UniConfig distribution. User can finds it in the utils/ directory after building and unpacking the UniConfig distribution. User can use it by simple shell script called'convertYangsToUniconfigSchema.sh'. YANG packager is responsible for:","validation of user-provided YANG files","copying valid YANG files to the user-defined directory","informing the user about conversion process"]},{"l":"Usage","p":["-d /path/to/default- optional argument. Sometimes some YANG files need additional dependencies that are not provided in source directories. In this case it is possible to use path to the 'default' directory which contains additional YANG files. If there is this missing YANG file, YANG packager will use it.","-enableSwagger- optional argument. Path to file that enables OpenAPI generation.","-g- optional argument. Path to directory where generated Java sources with constants from YANG elements are saved. By default, generation of Java files is disabled.","-i /path/to/sources- required argument. User has two options for where the path can be directed:","-jd- optional argument. Flag that enables to generate java documentation on data elements.","-o /path/to/output-directory- required argument. User can define path where he wants to save valid YANG files. If the output directory exists, it will be replaced by a new one.","-pn- optional argument. Custom package name of generated classes.","-px- optional argument. Flag that enables prefix for generated constants names inside generated classes.","-r- optional argument. Selection of repositories inside source directory with files or file with defined names of directories which contains files, from which constants will be generated.","-s /path/to/skip-list- optional argument. User can define YANG file names in text file that he does not want to include in conversion process. This file must only contain module names without revision and .yang suffix.","-to-file- optional argument. When user uses this flag, then YANG packager also saves the debug output to a file. This file can be found on a same path as output-directory. It will contain suffix '-info' in its name. If the output directory is called 'output-directory', then the file will be called 'output-directory-info'.","./convertYangsToUniconfigSchema --help","./convertYangsToUniconfigSchema -h","Bash script ./convertYangsToUniconfigSchema also includes simple help facility. There are two options how to show the help text:","If compilation process detected some invalid YANG files then output directory will not be created. In this case, user has to fix invalid YANG files or use a combination of \"-d\" and \"-s\" arguments.","Script ./convertYangsToUniconfigSchema contains four arguments. Each one has its own identifier so user can use any order of arguments. Two arguments are required, namely the path to resources that contain YANG files and the path to the output directory where user wants to copy all valid YANG files. Other three arguments are optional. First one is the path to the\"default\" directory which contains some default YANG files, second one is the path to the \"skip-list\" and last one is a \"-to-file\" flag, which user can use when he wants to write a debug output to file.","The user is responsible for the validity of YANG files in the default directory. These files are not checked by YANG package.","to the directory that contains YANG files and other sub-directories with YANG files","to the text-file that contains defined names of directories. These defined directories have to be stored on the same path as text-file."]},{"l":"Example use-case"},{"l":"Basic usage 1","p":["This is basic usage of the script where only mandatory arguments are used. In this case, there is a directory with YANG files used as source. All files in source directory are valid YANG files. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Basic usage 2","p":["This is basic usage of the script where only mandatory arguments are used. In this case, there is directory with YANG files used as source. Source directory also contains one invalid YANG file with missing import. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Basic usage 3","p":["This is basic usage of the script where only mandatory arguments are used. In this case, there is directory with YANG files used as source. Source directory also contains one non-yang file. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Usage with default directory","p":["This is usage with path to default directory that contains one YANG file openconfig-mpls. Source directory also contains one invalid YANG file 'cisco-xr-openconfig-mpls-deviations.yang' with missing import 'openconfig-mpls'. This missing import is loaded from default directory. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Usage with skip-list","p":["This is usage with path to skip-list text file that contains one YANG file name cisco-xr-openconfig-mpls-deviations. This YANG file will not be included in the conversion process. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Usage with text-file as a source","p":["In this example a path to text-file with defined names of source directories is used.","Open a terminal, go to the ../utils directory and run command:"]},{"i":"usage-with--to-file-flag","l":"Usage with -to-file flag","p":["This is usage where output is also printed to file. User can find output information file on the path /path/to/output-info.","Open a terminal, go to the ../utils directory and run command:"]},{"i":"usage-with-text-file-as-a-source-and--to-file-flag","l":"Usage with text-file as a source and -to-file flag","p":["In this example a path to text-file with defined names of source directories is used and also flag for print outputs to files. User can find output information files on paths /path/to/output/directory-1-info and /path/to/output/directory-2-info","Open a terminal, go to the ../utils and run command:","Content of text-file"]},{"i":"usage-with--enableswagger-flag","l":"Usage with '-enableSwagger' flag","p":["In this example a path to a text-file with defined names of source directories is used. A flag to print outputs to files and a flag to enable swagger for OpenAPI files generation. The swagger configuration file is located at ../utils/config/swagger-config.json. Swagger output file / files are generated per directory, and they are located in the output directory. The user can find output information files on paths /path/to/output/directory-1-info and /path/to/output/directory-2-info.","Open a terminal, go to the ../utils directory. Run the command:","Additional parameters are available for swagger generation that further customise the OpenAPI file / files. These parameters are located at the beginning of the page.","The output then looks like this:"]},{"i":"error---source-directory-does-not-exist","l":"Error - source directory does not exist","p":["User-defined source directory does not exist.","Open a terminal, go to the ../utils directory and run command:"]},{"i":"error---source-directory-is-empty","l":"Error - source directory is empty","p":["User-defined source directory is empty. Open a terminal, go to the ../utils directory and run command:"]},{"i":"error---sources-defined-in-text-file","l":"Error - sources defined in text-file","p":["One directory defined in the text-file is empty and other one does not exist.","Open a terminal, go to the ../utils and run command:","Content of text-file"]}],[{"l":"Admin State","p":["Admin state is used to lock, unlock or southbound-lock devices. Modification of data on those devices is then allowed or forbidden accordingly. Currently, there are three states that are supported:","LOCKED - When a device is administratively locked, it is not possible to modify its configuration, and no changes are ever pushed to the device.","UNLOCKED - Device is assumed to be operational. All changes are attempted to be sent southbound. This is the default when a new device is created.","SOUTHBOUND_LOCKED - It is possible to configure the device, but no changes are sent to the device. Admin mode is useful when pre provisioning devices.","This state is automatically added to the device during installation. The user can further specify what state the device should be in, via:","\"uniconfig-config:admin-state\": \"unlocked\"","The state variable should be from one of the above-mentioned options.","If the user wants to change the state after the installation, an RPC for changing that state is available."]},{"l":"RPC Example","p":["RPC input contains the device name and the state that it should be change to."]},{"i":"rpc-example-1","l":"RPC Example","p":["GET request to get the actual state of the device."]},{"l":"RPC Failed Example","p":["Device is in locked admin-state and the user tries to modify data on the device."]}],[{"l":"Build-and-Commit Model"},{"l":"Introduction","p":["Build-and-commit model is based on explicit creation of the transaction, invoking operations in the scope of this transaction and finally committing or closing transaction. The transaction represents a session between the client and the UniConfig instance.","Using explicitly created transactions has multiple advantages in comparison to Immediate Commit Model:","Multiple operations and modifications can be invoked in the single transaction while keeping transactions isolated.","Most of the UniConfig operations, such as calculate-diff and commit, doesn't have any usage in the Immediate Commit Model - they are valuable only if the Build-and-Commit Model is used.","The transaction allows a client to identify if it still communicates with the same UniConfig instance (this property is usable in the clustered deployment). If the UniConfig instance does not know about the transaction then the request will fail because transaction expired, is closed, or has never been created."]},{"l":"Configuration","p":["Configuration related to UniConfig transactions is placed in the'config/lighty-uniconfig-config.json' file under 'transactions' container. Note that build-and-commit model is enabled if'uniconfigTransactionEnabled' is set to 'true' value (default value)."]},{"l":"Optimistic locking mechanism","p":["Race condition between transactions that are committed in parallel and contain changes of same nodes (uniconfig, unistore, snapshot, or template nodes) is solved using optimistic locking mechanism. Configuration of same node can be modified in parallel from 2 transactions, however only the first committed transaction will succeed. Commit of the second transaction will fail.","UniConfig uses 2 different techniques for detection of conflicts during commit or checked-commit operation:","Comparison of configuration fingerprints - Fingerprint value is updated for altered node at the end of the commit operation - at the beginning of commit operation, UniConfig compares the value of actual fingerprint in database with value of fingerprint read before the first CRUD operation done in the transaction and the last synced fingerprint (updated after execution of sync-from-network RPC). If actual fingerprint from database equals to fingerprint read before the first CRUD operation or the last synced fingerprint, then commit operation can continue. Otherwise, error is returned without touching any devices on network.","Per-node advisory locks - Comparison of configuration fingerprints are reliable if transactions are committed one after another. However, such serialization cannot be achieved in the clustered environment because UniConfig instances are not coordinated. If 2 transactions are committed at the same time and both assume that configuration fingerprints haven't been updated by other transaction, both transactions may start to push changes to network devices at the same time. To prevent prevent occurrences of this scenario, UniConfig locks node in the PostgresSQL database using transaction-level advisory locks at the beginning of commit operation. If other transaction tries to lock the same node, this attempt will fail, and second transaction will not enter critical section - rather it will fail. Locks are automatically released at the end of the transaction (commit RPC closes transaction).","All possible scenarios are captured in the following diagrams.","Optimistic locking"]},{"l":"Dynamic mountpoints","p":["Mountpoints are created only when UniConfig needs to read / write some data from / to device and lifecycle of mountpoint is bounded by lifecycles of transactions that use the mountpoint. If some mountpoint is not used by any transaction, then UniConfig automatically closes this mountpoint - associated operational data on southbound layer and connection to device are removed.","The first diagram demonstrates mounting of 2 devices which are used by 1 transaction - after this transaction is closed, both mountpoints are closed. The second diagram shows scenario in which 2 transactions share 1 of 2 mountpoints - after the first transaction is closed, 1 of the mountpoints is not closed since the second transaction still may communicate with corresponding device."]},{"l":"Creation of transaction","p":["Transaction can be created using create-transaction RPC. RPC doesn't specify input body and also returns response without body. Response additionally contains Set-Cookie header with UNICONFIGTXID key and corresponding value - transaction identifier that conforms RFC-4122 Universally Unique IDentifier (UUID) format.","Process of transaction creation is depicted by following sequence diagram.","create-transaction RPC","UniConfig is performing following steps after calling create-transaction RPC:","Creation of connection to database system - Connection is created with disabled auto-commit - enabling transactional features. UniConfig uses 'read committed' isolation level.","Creation of database transaction - It provides access to remote PostgreSQL database. Using database transaction it is possible to read committed data, read uncommitted changes created by this transaction and write modifications to database. Data read at the first access to some resource is cached to datastore transaction - when some component tries to access the same resource again, it is read only from datastore transaction. Data is written to database transaction at invocation of commit/checked-commit RPC.","Creation of datastore read-write transaction - It provides access to OPER and CONFIG datastores bound to this transaction. Datastore is used only as a cache between application and PostgreSQL database, and it resides only in the memory allocated to UniConfig process. Datastore transaction is never committed - cache is trashed at the end of the transaction life.","Registration of transaction - Transaction is always bound to 1 specific UniConfig instance."]},{"l":"Successful example","p":["The following request shows successful creation of UniConfig transaction. Response contains Set-Cookie header with UNICONFIGTXID key and value."]},{"l":"Failed example","p":["The most common reason for failed creation of UniConfig transaction is reached maximum number of open transactions that is limited by('maxDbPoolSize' - 'maxInternalDbConnections') database connection pool setting. In that case, UniConfig returns response with 500 status code."]},{"l":"Transaction idle-timeout","p":["Create-transaction RPC can be used with optional query parameter called timeout. This parameter is used to override global idle timeout for transaction created by this RPC call. After transaction inactivity for specified time transaction will be automatically cleaned. Value of this parameter is whole number and defines time in seconds."]},{"l":"Dedicated session to device","p":["By default, UniConfig shares southbound session to network device, if multiple UniConfig transactions use the same device via same management protocol. This behaviour can be disabled using 'dedicatedDeviceSession' query parameter which accepts boolean value. Afterwards, UniConfig transaction will create dedicated session to device which is used only by one transaction and closed immediately after committing or closing the transaction.","Dedicated sessions to device are useful when:","Device is not able to process requests in parallel via same session.","Device is able to process requests in parallel via same session, but it doesn't process them in parallel","decreasing processing performance."]},{"l":"Invocation of CRUD operation in transaction","p":["CRUD operations for modification or reading node configuration can be invoked in the specific transaction by appending UNICONFIGTXID (key) with UUID of transaction (value) to Cookie headers. In that case, operation will be invoked only in the scope of single transaction - changes are not visible to other transactions until this transaction is successfully committed.","Next diagram describes execution of CRUD operation from RESTCONF API. It shows also difference between datastore and database transaction - data is read from database only at the first access to some data (for example, node configuration). After that, this configuration is cached inside temporary datastore transaction - goal is to improve performance by limiting transferring data between UniConfig and PostgreSQL. Next access to same configuration can be evaluated under in-memory datastore.","Invocation of CRUD"]},{"i":"successful-example-1","l":"Successful example","p":["The following request demonstrates reading of some configuration from uniconfig topology, junos node in the transaction with ID'd7ff736e-8efa-4cc5-9d27-b7f560a76ff3'."]},{"i":"failed-example-1","l":"Failed example","p":["Trying to use non-existing UniConfig transaction results in 422 status code (Unprocessable Entity)."]},{"l":"Invocation of RPC operation in transaction","p":["RPC operation can be invoked in the specific transaction the same way as CRUD operation - by specification of UNICONFIGTXID in the Cookie header.","There are few differences between CRUD and RPC operations from the view of transactions:","Commit, checked-commit, and close-transaction RPCs can state of the transaction. Create-transaction RPC is reserved for creation of transaction.","Not all RPC operations that are exposed by UniConfig use dedicated transactions - in that case, these RPCs just ignore explicitly specified transaction and either don't work with transactions at all or create transaction internally (examples: install-node, uninstall-node RPC).","There are also transaction-aware operations that directly leverage properties of transactions. For example, if some UniConfig RPC is invoked with empty list of target nodes, then operation is automatically applied to all modified nodes in the transaction(calculate-diff RPC with empty target nodes computes diff for all modified nodes in the transaction).","Following diagram shows execution of random RPC in the specified transaction.","Invocation of RPC"]},{"i":"successful-example-2","l":"Successful example","p":["Invocation of calculate-diff RPC in the transaction which contains modifications done on the 'junos' node."]},{"i":"failed-example-2","l":"Failed example","p":["Invocation of calculate-diff RPC with transaction ID that has wrong format."]},{"l":"Closing transaction","p":["There are 2 options how transaction can be closed:","close-transaction RPC - Explicit closing of transaction that results in dropping of all changes done in the transaction.","commit/checked-commit RPC - After execution of commit operation, transaction is automatically closed (despite of commit result). Behaviour of commit and checked commit RPC is described in better detail under the 'UniConfig Node Manager' section.","Close-transaction RPC doesn't contain body, only Cookie header with UNICONFIGTXID property pointing to transaction that user would like to close. Response contains information if transaction has been successfully closed.","Following sequence diagrams describe close-transaction procedure. It is split into 2 diagrams to improve readability and to reuse some parts from other diagrams.","close-transaction RPC","Clean orphaned mountpoints","Briefly depicted most important actions:","Loading UniConfig transaction from registry by provided transaction ID that is extracted from Cookie header.","Closing connection to database.","Cancellation of database transaction.","Cancellation of datastore read-write transaction.","Unregistration of transaction from local registry.","Unmounting nodes that are not referenced by any UniConfig transaction - connection to device is closed and representing southbound / Unified mountpoints are removed together with state data.","After transaction is closed, it cannot be used by any other operation - user must create a new transaction in order to use build-and-commit model."]},{"i":"successful-example-3","l":"Successful example","p":["Closing existing transaction using close-transaction RPC. Response doesn't body, only status code 200."]},{"i":"failed-example-3","l":"Failed example","p":["If transaction has already been closed, user will receive response with JSON body containing error message."]},{"l":"Transaction cleaner","p":["Transaction cleaner is used for automatic closing of transactions that are open longer then specified timeout value ('transactionIdleTimeOut' or 'maxTransactionAge' setting in the configuration). Transaction resets her time setting 'transactionIdleTimeOut' after invoking CRUD, RPC operation, and is still valid for time specified in value of setting. This mechanism effectively suppresses application-level errors - open transactions are not closed at the end of the workflow.","Next sequence diagram describes cleaning process. Referenced diagram'Close transaction' is placed in the previous 'Closing transaction' section."]},{"l":"Use cases"},{"l":"Modification of different devices in separate transactions","p":["1. Installation of 2 devices - ‘xr6_1’ and ‘xr6_2’ (without transaction ID)","2. Creation of 2 uniconfig transactions: let’s name them TX1 and TX2","3. Modification of ‘xr6_1’ uniconfig configuration inside TX1","4. Modification of ‘xr6_2’ uniconfig configuration inside TX2","5. Verification if TX1 and TX2 are isolated","6. Committing TX1 and TX2 using uniconfig-manager:commit RPC","7. Verification of committed data","8. Verification if TX1 and TX2 are closed","All 3 responses - Status 200 OK with returned expected data. Similar verification can be done on 'xr6_2'.","Both responses should return Status 404 Not Found:","Creation of new Loopback79 interface - cookie header contains UNICONFIGTXID of TX2:","Creation of new Loopback97 interface in the TX1 - cookie header contains UNICONFIGTXID of TX1:","It is not required to specify target nodes in the input because UniConfig transaction tracks modified nodes:","Response - Status 422 Unprocessable Entity:","Response:","Since there aren't any conflicts between modifications in the committed transactions, both RPCs should succeed. Expected responses:","The first response contains transaction-id of TX1 that can be used in the subsequent requests that belong to TX1:","The first second contains transaction-id of TX2 that can be used in the subsequent requests that belong to TX2:","Trying to read some data in the TX1:","Trying to read some data in the TX2:","TX1 doesn't see modifications done in TX2 and vice-versa:","Verification if configuration was correctly committed to devices (direct read under yang-ext:mount) and if datastore was updated (GET request without transaction ID):","Verification if TX1 contains created interface (Cookie header contains UNICONFIGTXID of TX1):","Verification if TX2 contains created interface (Cookie header contains UNICONFIGTXID of TX2):"]},{"l":"Modification of sub-tree on same device in separate transactions","p":["1. Installation of device ‘xr6_1’","2. Preparation of configuration on 'xr6_1'","3. Creation of 2 uniconfig transactions: let’s name them TX1 and TX2","4. Modification of ‘xr6_1’ uniconfig configuration inside TX1","5. Modification of ‘xr6_1’ uniconfig configuration inside TX2","6. Commit TX1","7. Commit TX2","8. Verification of committed data in TX1 / non-committed data in TX2","9. Verification if TX1 and TX2 are closed","Changing description of interface Loopback97 to 'next loopback': - there is a conflict with TX1 which also tries to create/replace the configuration of the same interface:","Changing description of interface Loopback97 to 'test loopback':","Commit TX1 without target nodes - it should fail because the same node has already been modified by different transaction that has already been committed:","Commit TX2 without target nodes - it should pass:","Creation of Loopback97 interface with some initial description:","Creation of the uniconfig transaction TX1:","Creation of the uniconfig transaction TX2:","Respective responses:","Response - Status 200 OK with error message:","Response:","Trying to read some data in the transaction:","Verification if committed changes in TX1 were applied to datastore and device:"]}],[{"l":"Device Discovery","p":["\"addressCheckLimit\" specifies how many addresses are checked. If more addresses are specified in the request, the request will not be successful.","\"max-pool-size\" specifies the size of the executor that is used. If the amount of addresses in the request is high, consider raising the value.","\"network\": \"192.168.1.0/24\"","\"start-ipv4-address\": \"192.168.1.1\", \"end-ipv4-address\":\"192.168.1.254\"","/opt/uniconfig-frinx/config/application.properties","~/FRINX-machine/config/uniconfig/frinx/uniconfig/config/application.properties","Execute the ifconfig command in the terminal and look for an interface. If you are using a VPN, the interface is often called tun0. If not, look for a different interface. Copy inet from the interface and paste it into the file.","For testing, you need to add your IP address to the configuration JSON file. The configuration file is located under","If you specify the range using a network statement, the network address and broadcast address will not be included in the discovery process. If you specify the range via range statements, make sure that only hosts addresses are included in the specified range.","If you want to discover hosts and ports in listening state in a network, do not add the network and broadcast address of that network. For example, if you want to check the network \"192.168.1.0/24\", you can use one of the following:","initial-pool-size of the thread pool that is used by the executor.","kepalive-time specifies the time (in seconds) before the execution of a specified task is timed out.","RPC device-discovery is used to verify reachable devices in a network. You can either check a single IP address in IPv4 format, a network or a range of addresses. Additionally, you can also specify a port or range of ports (TCP or UDP) that are checked if they are open. The ICMP protocol is used to check the availability devices.","The input consists of a list of all IP addresses that should be checked(IPv4 or IPv6, a single IP address or a network with a prefix, or a range of IP addresses). Additionally, it contains the TCP/UDP ports that should be checked whether they are open or not on the given addresses.","The output of the RPC shows if the IP addresses are reachable via the ICMP protocol. For every IP address, a list of open TCP/UPD ports is also included.","The snippet contains two additional parameters.","When running UniConfig stand-alone, the config file is in the config folder:"]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains a network with the prefix /29. Addresses in the network and desired ports are checked for availability. The output contains reachable addresses in the network and all open TCP/UDP ports."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains a range of addresses. The addresses and desired ports are checked for availability. The output contains reachable addresses and all open TCP/UDP ports."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains the host name and ports that are checked for availability. The output shows if the host is reachable as well as all open TCP/UDP ports."]},{"l":"Failed Example","p":["RPC input contains two addresses that are incorrectly wrapped."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains an IP range where the start point is greater than the end point."]},{"l":"Not supported operation Example","p":["RPC input contains a network in IPv6 format that is currently not supported."]}],[{"l":"Dry-run manager"},{"l":"RPC dryrun-commit","p":["The RPC will resolve the diff between actual and intended configuration of nodes by using UniConfig Node Manager. Changes for CLI nodes are applied by using cli-dryrun mountpoint which only stores translated CLI commands to the cli-dry-run journal. After all changes are applied, the cli-dryrun journal is read and an RPC output is created and returned. It works similarly with NETCONF devices, but it outputs NETCONF messages instead of CLI commands. RPC input contains a list of UniConfig nodes for which to execute the dry run. Output of the RPC describes the results of the operation and matches all input nodes. It also contains a list of commands, and NETCONF messages for the given nodes. If RPC is called with empty list of target nodes, dryrun operation is executed on all modified nodes in the UniConfig transaction. If one node failed for any reason the RPC will be failed entirely.","RPC dryrun commit"]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to the device if the RPC commit or checked-commit was called."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input does not contain target nodes, dryrun is executed with all modified nodes."]},{"l":"Failed Example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to the device if the RPC commit or checked-commit was called. One node does not support dry-run."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to the device if the RPC commit or checked-commit was called. One node has a bad configuration."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to a device if the RPC commit or checked-commit was called. One node does not support dry-run (IOSXR) and one is not in the unified topology (IOSXRN). There is one extra node, which has not been mounted yet (AAA)."]},{"i":"failed-example-3","l":"Failed Example","p":["RPC input contains a target node and the output contains a list of commands which would be sent to a device if the RPC commit or checked-commit was called. One node has not been mounted yet (AAA)."]},{"i":"failed-example-4","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there weren't any touched nodes, the request will result in an error."]}],[{"l":"Immediate Commit Model","p":["The immediate commit creates new transactions for every call of an RPC. The transaction is then closed so no lingering data will occur.","For reading data (GET request), a sequential diagram was created for better understanding of how the whole process works.","Get Request","Similarly, a sequential diagram for putting data (PUT request) was created as well.","Put Request","The key difference in those diagrams is that editing data (PUT, PATCH, DELETE, POST) + RPC calls in the database need to be committed, so there is an additional call of the commit RPC. This commit ensures that the transaction is closed. For reading data, it is necessary to close the transaction differently, because no data were changed, so calling a commit would be unnecessary.","When calling the 'sync-from-network' RPC, it internally calls'replace-config-with-operational'. Note that this only works when using the Immediate Commit Model."]},{"l":"Configuration","p":["Configuration related to UniConfig transactions is placed in the'config/lighty-uniconfig-config.json' file under 'transactions' container. A user can turn off the Immediate Commit Model and use only the Build and Commit Model instead."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains a new interface that will be added to the existing ones.","After putting the data into the database, they will be automatically committed and can be viewed."]},{"l":"Failed Example","p":["RPC input contains a value that is not supported."]}],[{"l":"Kafka Notifications"},{"l":"Introduction","p":["NETCONF devices produce NETCONF notifications. UniConfig can collect these and create its own UniConfig notifications about specific events. Notifications from both NETCONF devices and UniConfig are published using Kafka.","The following notification types are available:","NETCONF notifications","Notifications about transactions","Audit logs (RESTCONF notifications)","Data-change events","Connection notifications","Each notification type is stored in its own topic in Kafka. Additionally, all notifications are stored in one table in the database.","notifications-in-cluster"]},{"l":"Kafka","p":["Apache Kafka is a publish–subscribe-based, durable messaging system that sends messages between processes, applications and servers. Within Kafka, you can define topics (categories) and applications can add, process and reprocess records.","In our specific case, UniConfig publishes notifications. Each type of notification is stored in a separate topic and can therefore be subscribed to independently. The names of topics and connection data are configurable in the file lighty-uniconfig-config.json."]},{"l":"NETCONF notifications","p":["RFC 5277 defines a mechanism where the NETCONF client indicates an interest in receiving event notifications from a NETCONF server by subscribing to event notifications. The NETCONF server replies whether the subscription request was successful and, if so, starts sending event notifications to the NETCONF client as events occur within the system. Event notifications are sent until either the NETCONF session or the subscription is terminated.","NETCONF notifications are categorised as so-called streams. The subscriber must choose which streams to receive. The default stream is named NETCONF."]},{"l":"Notifications about transactions","p":["This type of notification is generated after each commit in UniConfig.","It contains the following:","transaction id","calculate diff result","commit result"]},{"i":"audit-logs-restconf-notifications","l":"Audit logs (RESTCONF notifications)","p":["Below are three examples of notifications with the response body and the calculation difference result.","body","http-method","It contains the following:","query-parameters","request data","response data","source-address","source-port","status-code","The first example is for created data:","The response body does not need to be included in notifications. It can be configured using the includeResponseBody parameter in the application.properties file. Also, the calculation difference result can be part of the notification if the parameter includeCalculateDiffResult parameter is set to true in the file application.properties.","The second example is for deleted data:","The third example is for updated data:","This type of notification is generated after each RESTCONF operation.","transaction id","uri","user-id"]},{"l":"Shell notifications","p":["This type of notification is generated after each shell operation.","It contains the following:","transaction id","request data","source-address","source-port","prompt","executed command","response data","output"]},{"l":"Data-change events","p":["A subscription step is required before data-change events are generated and published into Kafka. With the subscription, a user can specify observed subtrees against data changes. Afterwards, data-change events are generated by UniConfig instances when a transaction is committed and committed changes contain subscribed subtrees.","A sample data-change event captured by Kafka console consumer:","For data-change events, the streamName is always 'DCE' and the identifier for the YANG notification is 'data-change-event'. The body contains the following:","subscription-id: Identifier of the subscription that triggers generation of data-change-event. Subscription identifier makes association of subscriptions and received data-changes-events easier than using combination of multiple fields such as node identifier, topology identifier and subtree path.","transaction-id: Identifier of committed transaction that triggered data-change-event after commit or checked-commit UniConfig operations.","edit - List of captured modifications done in the committed transaction.","Edit entry fields:","subtree-path: Relative path to data-tree element at which data-change happened. Path is relative to subtree-path specified during subscription.","data-before: JSON representation of subtree data before done changes. If this field is not present, then 'data-after' represents created data.","data-after: JSON representation of subtree data including done changes. If this fields is not present, then'data-before' represents removed data.","operation: Operation type of the data change event.","node-id: Node identifier of the data change event.","topology-id: Topology where the node exists. Can be either 'uniconfig' or 'unistore'."]},{"l":"Connection notifications","p":["Connection notification are generated whenever the status of a node changes. For connection notifications, the streamName is always 'CONNECTION' and the identifier for the YANG notification is ' connection-notification'.","It contains the following:","topology id","node id","connection status","connection message","Supported topologies are cli, netconf and gnmi.","Sample connection notifications captured by Kafka console consumer:","CLI disconnect notification:","NETCONF connect notification:"]},{"l":"Database entities","p":["body - full notification body in JSON format","creation time - time when subscription was created","end time - time when notifications stop to be collected","event time - time when notification was generated","Example request for reading Kafka settings using RESTCONF:","Example request for reading notifications using RESTCONF:","Example request for reading subscriptions using RESTCONF:","identifier - name of the YANG notification","netconf-subscription","node id - id of the NETCONF node from which notifications should be collected","node id - node id of the NETCONF device for NETCONF notifications or identifier of UniConfig instance in case of other types of notifications","notification","Notifications are stored in the notification table. It contains the following columns:","settings","start time - time when notifications start to be collected","stream name - name of the notification stream - NETCONF stream name or UniConfig-specific stream name","stream name - NETCONF stream name","The following three tables in the database are related to notifications:","The netconf-subscription table is used to track NETCONF notification subscriptions. It contains the following columns:","The settings table contains two columns: identifier and config. Records with the identifier kafka contain configurations for Kafka that can be modified at runtime.","UniConfig instance id - instance id of UniConfig that is collecting notifications from the NETCONF device"]},{"l":"NETCONF subscriptions","p":["A subscription is required to receive NETCONF notifications from a NETCONF device. Subscriptions are created using an install request:","Subscriptions to notification streams are defined as a list with the name stream. There is one record for each stream. The only required parameter is stream-name. The following optional parameters are supported:","start-time- must be specified to enable replay and should start at the specified time.","stop time- used with the optional replay feature to indicate the newest notifications of interest. If stopTime is not specified, notifications will continue until the subscription is terminated. Must be used with and set to be later than start-time. Values in the future are valid.","The creation of a new subscription for the stream will terminate all existing subscriptions for the stream."]},{"i":"monitoring-system---processing-netconf-subscriptions","l":"Monitoring system - processing NETCONF subscriptions","p":["Inside UniConfig, NETCONF notification subscriptions are processed in an infinite loop within the monitoring system. An iteration of the monitoring system loop consists of following steps:","Check global setting for NETCONF notifications","If turned off, release all NETCONF subscriptions and end current iteration","Release cancelled subscriptions","Query free subscriptions from DB, and for each one:","Create a notification session (create mountpoint and register listeners)","Lock the subscription (set UniConfig instance)","There is a hard limit for the number of sessions that a single UniConfig node can handle. If the limit is reached, the UniConfig node refuses any additional subscriptions.","The loop interval, hard subscription limit and maximum number of subscriptions processed per interval can be set in the file lighty-uniconfig-config.json."]},{"l":"Dedicated NETCONF session for subscription","p":["A NETCONF device may have the interleave capability that indicates support for interleaving other NETCONF operations within a notification subscription. This means that the NETCONF server can receive, process and respond to NETCONF requests on a session with an active notification subscription. As not all devices include support for this capability, the common approach for devices 'with' and 'without' interleave capability is to track notifications with a separate NETCONF session. To support this functionality, UniConfig creates a separate NETCONF session with a separate mount-point for every subscription. These mount points and sessions are automatically destroyed when the corresponding subscription is closed.","monitoring-system"]},{"l":"Subscription to data-change events"},{"l":"Creating a new subscription","p":["'BASE': Represents only a direct change of the node on subtree-path, such as replacement of a node, addition or deletion.","'ONE': Represent a change (addition, replacement, or deletion) of the node on the subtree-path or one of its direct child elements.","'SUBTREE': Represents a change of the node or any of its child nodes, direct and nested. This scope is a superset of ONE and BASE. This is the default value.","captured data-change-events from whole node configuration.","data-change-scope: Data-tree scope that specifies how granular data-change-events should be captured and propagated to Kafka. There are three options:","deleting existing subscription","displaying information about created subscription using RPC","Example: Creating a subscription to the node device1 in the uniconfig topology, and to the whole configuration subtree '/interfaces'.","Example: Creating a subscription to the uniconfig topology and to the whole /interfaces configuration subtree.","node-id: Identifier for the node from which data-change-events are generated. This field is optional. If not given, a global subscription is created and data-change-events are generated for all nodes under the topology.","RPC input contains the following:","RPC output contains only the generated 'subscription-id' in the format of UUID. This subscription identifier represents a token that can be used for the following:","sorting received Kafka messages","Subscriptions to data-change events are created using the 'create-data-change-subscription' RPC. After the subscription is done, UniConfig listens to data-change events on selected nodes and subtrees and distributes the corresponding messages to a dedicated Kafka topic.","subtree-path: Path to the subtree from which the user would like to receive data-change-events. Default path is '/'","topology-id: Identifier for the topology where the specified node is placed."]},{"l":"Removing a subscription","p":["Existing subscriptions can be removed using the delete-data-change-subscription RPC and the provided subscription-id. After a subscription is removed, UniConfig stops generating new data-change events related to the subscribed path.","RPC input contains only subscription-id, a unique identifier for the subscription to data-change events. RPC output does not contain a body. The RPC returns 404 if no subscription exists for the provided identifier.","Example: Removing a subscription with the ID 8e82453d-4ea8-4c26-a74e-50d855a721fa."]},{"l":"Successful Example"},{"l":"Failed Example"},{"l":"Showing information about subscription","p":["The RPC show-subscription-data is used to display information about a created subscription.","RPC input contains the identifier of the target subscription.","RPC output for existing subscriptions contains 'topology-id', 'node-id', 'subtree-path' and 'data-change-scope' - the same fields that can also be specified in the 'create-data-change-subscription' RPC input.","If no subscription exists with the specified ID, the RPC returns a 404 status code with a standard RESTCONF error container.","Example: Displaying information"]},{"i":"successful-example-1","l":"Successful Example"},{"i":"failed-example-1","l":"Failed Example","p":["It is also possible to fetch all created subscriptions under a specific node or topology by sending a GET request to the data-change-subscriptions list under the node list item (operational data).","Example (there are two subscriptions under the device1 node):"]},{"l":"Configuration","p":["All notifications and the monitoring system can be enabled or disabled using the enabled flag.","All settings related to Kafka are grouped under kafka property. For authentication, there are the username and password properties. For the Kafka connection, there is the kafkaServers property. This contains a list of Kafka servers as a combination of brokerHost and brokerListeningPort. Broker host can be either an IP address or hostname.","archiveUrl - where to download kafka from","Audit logs settings are under auditLogs property. Currently there is only one flag includeResponseBody, which is used to enable or disable logging of the body of RESTCONF responses.","auditLogsEnabled","auditLogsTopicName - topic name for audit logs","blockingTimeout - How long the send() method and the creation of a connection for reading metadata methods will block (in ms).","cleanDataBeforeStart - if kafka config should be cleared before start","Configurations for notifications are in the lighty-uniconfig-config.json file, under the notifications property. The entire configuration looks like this:","dataChangeEventsEnabled","dataChangeEventsTopicName - topic name for data-change-events","dataDir - kafka data directory","deliveryTimeout - The upper bound on the time to report success or failure after a call to send() returns (in ms). Sets a limit on the total time that a record will be delayed prior to sending, the time to wait for acknowledgement from the broker (if expected) and the time allowed for retriable send failures.","enabled - flag that enables or disables embedded kafka","installDir - where should be kafka files placed","Kafka settings are also stored in the db. This way they can be changed at runtime using RESTCONF or UniConfig shell. Kafka setting are stored in the settings table.","maxAge - Maximum age of a record in the notifications table (in hours). Records older than this value are deleted. The default value is 100.","maxCount - Maximum number of records in the notifications table. If the number of records exceeds this value, the oldest record in the table is deleted. The default value is 10,000.","maxNetconfSubscriptionsHardLimit - Maximum number of subscriptions that a single UniConfig node can handle.","maxSubscriptionsPerInterval - The maximum number of free subscriptions that can be acquired in a single iteration of the monitoring system loop. If the number of free subscriptions is smaller than this value, all free subscriptions are processed. If the number of free subscriptions is larger than this value, only the specified number of subscriptions are acquired. The rest can be acquired during the next iterations of the monitoring system loop or by other UniConfing instances in the cluster. The default value is 10.","maxThreadPoolSize - The maximum thread pool size in the executor.","netconfNotificationsEnabled","netconfNotificationsTopicName - topic name for NETCONF notifications","optimalNetconfSubscriptionsApproachingMargin - The lower margin to calculate optimal range start. The default value is 0.05.","optimalNetconfSubscriptionsReachedMargin - The higher margin to calculate optimal range end. The default value is 0.10.","queueCapacity - The maximum capacity for the work queue in the executor.","rebalanceOnUCNodeGoingDownGracePeriod - Grace period for a UniConfig node going down. Other nodes will not restart subscriptions until the grace period has passed after a dead Uniconfig node was last seen. The default value is 120 seconds.","requestTimeout - How long the producer waits for acknowledgement of a request (in ms). If no acknowledgement is received before the timeout period is over, the producer will resend the request or, if retries are exhausted, fail it.","subscriptionsMonitoringInterval - How often the monitoring system loop is run and attempts to acquire free subscriptions. The value is given in seconds, the default value is 5.","These properties are under notificationDbTreshold. Both of these are implemented using database triggers. Triggers are running on inserts to notifications table.","Three (3) properties related to the monitoring system in clustered environments:","Three (3) properties related to the monitoring system:","Three (3) properties related to the timeout of messages to Kafka","transactionNotificationsEnabled","transactionsTopicName - topic name for transactions about notifications","Two (2) properties related to the thread pool executor required to send messages to Kafka","Two (2) properties used to limit the number of records in the notifications table in the database:","You can also to set up embedded Kafka. These setting are grouped under the embeddedKafka property:","You can configure the names of all topics for every notification type. The following flags are used for this:","You can enable or disable each type of notification independently of others. The following flags are used for this:"]},{"i":"kafka-client---example","l":"Kafka client - example","p":["To read notifications from kafka, you can use the command line consumer. Run the following command in the Kafka installation directory:","It is important to properly set up the hostname, port and topic name. Output after creation of NETCONF notification looks something like this:"]}],[{"l":"Operational data about transactions"},{"l":"Operational data about transactions","p":["To have a better overview of UniConfig transactions, there are operational data about all open transactions.","Data about transactions contain:","identifier (uuid)","creation time","last access time","idle timeout","hard timeout","list of changed nodes (incl. topologies)","additional context (random string, text column)","Data about transactions can be read using RESTCONF:","Example data about transactions:"]}],[{"l":"Templates Manager"},{"l":"Overview","p":["Templates can be used for reusing of some configuration and afterwards easier application of this configuration into target UniConfig nodes.","Basic properties of templates as they are implemented in UniConfig:","All templates are stored under 'templates' topology and each template is represented by separate 'node' list entry.","Whole template configuration is placed under'frinx-uniconfig-topology:configuration' container in the Configuration datastore. Because of this, configuration of template can be accessed and modified in the same way like modification of UniConfig node.","Templates are validated against single schema context. Schema context, against which validation is enabled, is selected at creation of template using 'uniconfig-schema-repository' query parameter. Value of the query parameter defines name of the schema repository that is placed under UniConfig distribution in form of the directory.","Currently implemented template features:","Variables- They are used for parametrisation of templates.","Tags- Tags can be used for selection of an operation that should be applied for the specific subtree at application of template to UniConfig node.","Schema validation of leaves and leaf-lists is adjusted, so it can accept both string with variables and original YANG type."]},{"l":"Latest-schema","p":["Latest-schema defines name of the schema repository of which built schema context is used for template validation. Latest-schema is used only if there is not 'uniconfig-schema-repository' query parameter when creating template. If 'uniconfig-schema-repository' query parameter is defined, latest-schema is ignored."]},{"l":"Configuration of the latest-schema","p":["Latest-schema can be set using PUT request. It will be placed in Config datastore. Name of directory has to point to existing schema repository that is placed under UniConfig distribution.","GET request can be used for check if latest-schema is placed in config datastore."]},{"l":"Auto-upgrading of the latest-schema","p":["Latest-schema can be automatically upgraded by UniConfig after installation of new YANG repository. YANG repository is installed after deploying of new type of NETCONF/GRPC device or after manual invocation of RPC for loading of new YANG repository from directory.","In order to enable auto-upgrading process, 'latestSchemaReferenceModuleName' must be specified in the'config/lighty-uniconfig-config.json' file:","After new YANG repository is installed, then UniConfig will look for revision of module'latestSchemaReferenceModuleName' in the repository. If found revision is more recent than the last cached revision, UniConfig will automatically write identifier of the fresh repository into 'latest-schema' configuration. Afterwards, 'latest-schema' is used by UniConfig the same way as it would be written manually via RESTCONF."]},{"l":"Variables","p":["Using variables it is possible to parametrise values in the template. Structural parametrisation is not currently supported.","Properties:","Format of the variable: '{$variable-id}'.","Variables can be set to each leaf and leaf-list in the template.","Single leaf or leaf-list may contain multiple variables.","Key of the list can also contain variable.","Variables are substituted by provided values at the application of template to UniConfig node.","It is possible to escape characters of the variable pattern ('$','{', '}'), so they will be interpreted as value and not part of the variable.","Variable identifier may contain any UTF-8 characters. Characters'$', '{', '}' must be escaped, if they are part of the variable identifier."]},{"l":"Examples with variables","p":["A. Leaf with one variable","Application of following values to variables 'var-a' and 'var-b':'var-a' = ['10', '20', '30'], 'var-b' = ['50', '70', '60'].","Application of values - 'var-x': 'next', 'var-y': '7', 'var-1': '10','var-2': '9'. Leaf 'leaf-a' has 'string' type and 'leaf-b' has 'int32' type.","Application of values '10' and 'false' to 'var-1', and 'var-2'. Leaf'leaf-a' has 'int32' type and 'leaf-b' has 'boolean' type.","B. Leaf with multiple variables","Both variables must be substituted by the same number of values.","C. Leaf-list with one variable","D. Leaf-list with multiple variables","E. Leaf-list with entry that contains multiple variables","F. Leaves and leaf-lists with escaped special characters","If leaf-list is marked as \"ordered-by user\", then the order of leaf-list elements is preserved during substitution process.","It is possible to substitute both variables with one or multiple variables.","Leaf 'leaf-a' contains 2 variables and surrounding text that is not part of any variable.","Leaf 'leaf-b' contains 2 variable without additional text - substituted values of these variables are concatenated at application of template.","Leaf-list 'leaf-list-a' contains 2 variables inside one leaf-list entry: 'var-a' and 'var-b'.","Leaf-list 'leaf-list-a' contains 2 variables with identifiers'var-a' and 'var-2'. String \"str3\" represents constant value.","Leaf-list 'leaf-list-a' contains variable with identifier 'var-x'.","Substitution of 'var-1' by 'prefix' and 'var-{2}' by '10':","Substitution of 'var-a' with texts 'str1', 'str2' and 'var-b' with'str4' results in ('string' type):","Substitution of 'var-x' with numbers '10', '20', '30' results in('int32' type):","The following example demonstrates escaping of special characters outside of the variable identifier (leaf-list 'leaf-list-a') and inside of the variable identifier (leaf 'leaf-a').","The following example shows 2 leaves with 2 variables: 'var-1' and'var-2'.","This variable can be substituted by one or multiple values. If multiple values are provided in the apply-template RPC, they are'unwrapped' to the leaf-list in form of next leaf-list entries.","Unescaped identifier of the leaf 'leaf-a': 'var-{2}'."]},{"l":"Tags","p":["By default, all templates have assigned 'merge' tag to the root'configuration' container - if template doesn't explicitly define next tags in the data-tree, then the whole template is merged to target UniConfig node configuration at execution of apply-template RPC. However, it is possible to set custom tags to data-tree elements of the template.","Properties:","Tags are represented in UniConfig using node attributes with the following identifier: 'template-tags:operation'.","In RESTCONF, attributes are encoded using special notation that is explained in the 'RESTCONF' user guide.","Tags are inherited through the data-tree of the template. If data-tree element doesn't define any tag, then it is inherited from parent element.","Only single tag can be applied to one data node.","Tags can be applied to following YANG structures: container, list, leaf-list, leaf, list entry, leaf-list entry.","Currently, the following tags are supported:","merge: Merges with a node if it exists, otherwise creates the node.","replace: Replaces a node if it exists, otherwise creates the node.","delete: Deletes the node.","create: Creates a node. The node can not already exist. An error is raised if the node exists.","update: Merges with a node if it exists. If it does not exist, it will not be created."]},{"l":"Examples with tags","p":["A. Tags applied to container, list, and leaf","Template with name 'user_template' that contains 'merge', 'replace', and 'create' tags:","Description of all operations in the correct order that are done based on the defined tags:","Container 'configuration' will be merged to target UniConfig node(implicit root operation).","Container 'system:system' will be updated - its content is merged only, if it has already been created.","The whole list 'users' will replaced in the target UniConfig node.","Leaf named 'password' will be created at the target UniConfig node - it cannot exist under 'users' list entry, otherwise the error will be raised.","B: Tags applied to leaf-list, leaf-list entry, and list entry:","The following JSON represents content of sample template with multiple tags:","'replace' tag is applied to single list 'my-list' entry","'merge' tag is applied to whole 'leaf-list-a' leaf-list","'create' tag is applied to whole 'leaf-list-b' leaf-list","'delete' tag is applied to single leaf-list 'leaf-list-b' entry with value '10'"]},{"l":"Creation of template","p":["A new template can be created by sending PUT request to new template node under 'templates' topology with populated 'configuration' container. Name of the template equals to name of the 'node' list entry. This RESTCONF call must contain specified schema cache repository using the 'uniconfig-schema-repository' query parameter in order to successfully match sent data-tree with correct schema context (it is usually associated with some type of NETCONF device)."]},{"i":"example---creation-of-template","l":"Example - creation of template","p":["The following example shows creation of new template with name'interface_template' using 'schemas_1' schema repository. The body of the PUT request contains whole 'configuration' container."]},{"i":"readupdatedelete-template","l":"Read/update/delete template","p":["All CRUD operations with templates can be done using standard RESTCONF PUT/DELETE/POST/PLAIN PATCH methods. As long as template contains some data under 'configuration' container, next RESTCONF calls, that work with templates, don't have to contain 'uniconfig-schema-repository' query parameter, since type of the device is already known."]},{"i":"examples---restconf-operations","l":"Examples - RESTCONF operations","p":["Reading specific subtree under 'interface_template' - unit with name'{$unit-id}' that is placed under interface with name'eth-0/{$interface-id}'.","Changing 'update' tag of the 'address' list entry to 'create' tag using PLAIN-PATCH RESTCONF method."]},{"l":"RPC get-template-info","p":["This RPC shows information about all variables in specified template. The RPC input has to contain template name."]},{"i":"creation-of-template-1","l":"Creation of template"},{"l":"Usage of RPC"},{"l":"RPC get-template-nodes","p":["This RPC returns all templates from the template topology. No input body is required."]},{"l":"Successful example","p":["There are no templates in the template topology."]},{"i":"successful-example-1","l":"Successful example","p":["There is a template called 'test-template' in the template topology."]},{"l":"Upgrading template to latest yang repository","p":["Template can be upgraded to latest YANG repository using 'upgrade-template' RPC. This procedure consists of:","Read template- Reading of template configuration from'templates' topology in Configuration datastore.","Version-drop- Conversion of template into target schema context that is created by specified yang-repository. Because of this feature, it is possible to change template between different versions of devices with different revisions of YANG schemas but with similar structure. Version-drop is also aware of 'ignoredDataOnWriteByExtensions' RESTCONF filtering mechanism.","Removal of previous template / writing new template- If'upgraded-template-name' is not specified in RPC input, previous template will be deleted and replaced by new one. If it is specified, previous template will not be deleted.","Description of input RPC fields:","template-name: Name of the existing input template. This field is mandatory.","upgraded-template-name: Name of upgraded/new template. This field is optional.","yang-repository: Name of YANG repository against which version-dropping is used. This field is optional. If no yang-repository is specified, latest yang repository will be used.","Description of fields in RPC response:","No fields are used, only HTTP response codes [200 - OK, 404 - Fail]"]},{"i":"usage-of-rpc-1","l":"Usage of RPC"},{"l":"Auto-upgrading of templates","p":["This feature is used to automatically upgrade all stored templates using the old YANG repository to the latest YANG repository with help from the version-drop procedure. For the auto-upgrading process to work, the latest YANG repository must already be configured. The upgrade process must be explicitly enabled in the configuration file and occurs when UniConfig is started.","There is also an option to back up templates before the upgrade with the standard rotation procedure. The names of backed-up templates follow the pattern ' backup', where '' represents the name of the original template and '' represents the backup index. The most recent backup index is always '0' and older ones are rotated by incrementing the corresponding index. If a backed-up template reaches the configured limit (maximum number of backups), it is permanently removed from the database.","Overview of available settings ('config/lighty-uniconfig-config.json'):","enabledTemplatesUpgrading- Enables the auto-upgrading process at UniConfig startup. If disabled, the other setting is ignored.","backupTemplatesLimit- Maximum number of stored backup templates. If exceeded, older templates are removed during the rotation procedure. If set to 0, templates are not backed up at all."]},{"l":"Application of template","p":["Application of tags- Data-tree of the template is streamed and data is applied to target UniConfig node based on set tags on data elements, recursively. UniConfig node configuration is updated only in the Configuration datastore.","Description of fields in RPC response:","Description of input RPC fields:","error-message(optional): Description of the error that occurred during application of template.","error-type(optional): Type of the error.","leaf-list-values: List of values - it can be used only with leaf-lists. Special characters ('$', '{', '}') must be escaped.","leaf-value: Scalar value of the variable. Special characters('$', '{', '}') must be escaped.","node-id: Target UniConfig node identifier (key of the list).","node-result: Per target UniConfig node results. The rule is following - all input UniConfig node IDs must also present in the response.","overall-status: Overall status of the operation as the whole. If application of the template fails on at least one UniConfig node, then overall-status will be set to 'fail' (no modification will be done in datastore). Otherwise, it will be set to 'complete'.","Processing template configuration","Read template- Reading of template configuration from'templates' topology in Configuration datastore.","RPC apply-template","status: Status of the operation: 'complete' or 'fail'.","String-substitution- Substitution of variables by provided values or default values, if there aren't any provided values for some variables and leaf/leaf-list defines a default values. If some variables cannot be substituted (for example, user forgot to specify input value of variable), an error will be returned.","Template can be applied to UniConfig nodes using 'apply-template' RPC. This procedure does following steps:","template-node-id: Name of the existing input template.","The following sequence diagram and nested activity diagram show process of 'apply-template' RPC in detail.","uniconfig-node-id: Target UniConfig node identifier.","uniconfig-node: List of target UniConfig nodes to which template is applied ('uniconfig-node-id' is the key).","variable-id: Unescaped variable identifier.","variable: List of variables and substituted values that must be used during application of template to UniConfig node. Variables must be set per target UniConfig node since it is common, that values of variables should be different on different devices. Leaf'variable-id' represents the key of this list.","Version-drop- Conversion of template into target schema context that is used by target UniConfig node. This component also drops unsupported data from input template. Because of this feature, it is possible to apply template between different versions of devices with different revisions of YANG schemas but with similar structure. Version-drop is also aware of 'ignoredDataOnWriteByExtensions' RESTCONF filtering mechanism."]},{"i":"examples---apply-template-calls","l":"Examples - apply-template calls","p":["Successful application of the template 'service_group' to 2 UniConfig nodes - 'dev1' and 'dev2'.","Failed application of the template 'temp1' - template doesn't exist.","Failed application of the template 'service_group' to 2 UniConfig nodes","'dev1' and 'dev2' - user hasn't provided values for all required variables.","Failed application of the template 'redundancy_template' to UniConfig node 'dev1' - type of the substituted variable value is invalid (failed regex constraint)."]},{"l":"RPC create-multiple-templates","p":["One or more new templates can be created by this RPC. Templates are parsed and written in parallel for better performance. If specified templates already exist, their configuration is replaced. Execution of RPC is atomic - either all templates are successfully created or no changes are made in the UniConfig transaction.","Description of input RPC fields:","template-name:Name of the created template.","yang-repository: YANG schema repository used for parsing of template configuration. Default value: 'latest'.","template-configuration: Whole template configuration.","tags: List of template tags that are written on the specified paths in all created templates. Specified tag type must be prefixed with 'template-tags' module name based on RFC-8040 formatting of identityref.","Only template-name and template-configuration are mandatory fields."]},{"l":"Examples","p":["Successful creation of templates.","Failed to find YANG schema repository.","Failed to parse template configuration.","Creation of 2 templates with separately specified template tags - 'replace' tag is added to '/acl/category' and'/services/group=default/types' elements, while 'create' is added to '/services' element."]}],[{"i":"uniconfig---sending-and-receiving-data-restconf","l":"UniConfig - Sending and receiving data (RESTCONF)"},{"l":"Overview","p":["RESTCONF is described in RESTCONF RFC 8040. Put simply, RESTCONF represents a REST API for accessing datastores and UniConfig operations."]},{"l":"Datastores","p":["There are two datastores:","Config: Contains data representing the intended state. Possible to read and write via RESTCONF.","Operational: Contains data representing the actual state. Possible only to read via RESTCONF.","Each request must start with the URI /rests/. By default, RESTCONF listens on port 8181 for HTTP requests."]},{"l":"REST Operations","p":["RESTCONF supports: OPTIONS, GET, PUT, POST, PATCH, and DELETE operations. Request and response data can be either in the XML or JSON format.","XML structures according to YANG are defined at: XML-YANG.","JSON structures are defined at: JSON-YANG.","Data in the request must set the Content-Type field correctly in the HTTP header with the allowed value of the media type. The media type of the requested data must be set in the Accept field. Get the media types for each resource by calling the OPTIONS operation.","Most of the paths use Instance Identifier. is used in the explanation of the operations and must adhere to these rules:","Identifier must start with :> where is a name of the YANG module and is the name of a node in the module. If the next node name is placed in the same namespace as the previous one, it is sufficient to just use after the first definition of:. Each has to be separated by /."," can represent a data node which is a list node, container, leaf, or leaf-list YANG built-in type. If the data node is a list, there must be defined ordered keys of the list behind the data node name, for example, =,. ..","The following example shows how reserved characters are percent-encoded within a key value. The value of \"key1\" contains a comma, single-quote, double-quote, colon, double-quote, space, and forward slash (,'\":\" /). Note that double-quote is not a reserved character and does not need to be percent-encoded. The value of \"key2\" is the empty string, and the value of \"key3\" is the string \"foo\".","Example URL: /rests/data/example-top:top/list1=%2C%27\"%3A\"%20%2F,,foo","The format : has to be used in this case as well. Module A has node A1. Module B augments node A1 by adding node X. Module C augments node A1 by adding node X. For clarity, it has to be known which node is X (for example: C:X)."]},{"l":"Mount point","p":["The purpose of yang-ext:mount container is to access southbound mountpoint, when the node is already installed in Uniconfig (After install-node RPC). It exposes operations for reading device data which can only be done under connection-specific topology (cli/netconf) with defined node-id in URI. In this case, the URI has to be in the format/ yang-ext:mount/. The first is the path to a mount point and the second is the path to subtree behind the mount point. An URI can end in a mount point itself by using /yang-ext:mount. In this case, if there is no content parameter, whole operational and configuration data will be read.","Examples of retrieving data behind yang-ext:mount","In this request, we are using parameter content=config, this means we are reading candidate NETCONF datastore. Value config of parameter content is translated into get-config NETCONF RPC.","In this request we are using parameter content=nonconfig, which means that we are reading running NETCONF datastore. Value nonconfig is translated into get NETCONF RPC. We can compare it with data directly from device using show running-config command.","Examples of invocation of yang actions behind yang-ext:mount.","Invocation of yang action -> List available firmware packages on disk","Invocation of yang action -> Erase running-config-then load","To completely understand installing of node see Device installation."]},{"l":"HTTP methods"},{"i":"options-rests","l":"OPTIONS /rests","p":["Returns the XML description of the resources with the required request and response media types in Web Application Description Language (WADL)."]},{"i":"get-restsdataidentifiercontentconfig","l":"GET /rests/data/?content=config","p":["Returns a data node from the Config datastore."," points to a data node that must be retrieved.","Value 'config' represents default value of content query parameter - it doesn't have to be specified, if user would like to read intended/uncommitted changes from Config datastore.","Request GET '/rests/data/' would return the same data."]},{"i":"get-restsdataidentifiercontentnonconfig","l":"GET /rests/data/?content=nonconfig","p":["Returns the value of the data node from the Operational datastore."," points to a data node that must be retrieved."]},{"i":"get-restsdataidentifiercontentall","l":"GET /rests/data/?content=all","p":["Returns a data node from both Config and Operational datastores. The outputs from both datastores are merged into one output."," points to a data node that must be retrieved."]},{"i":"put-restsdataidentifier","l":"PUT /rests/data/","p":["Updates or creates data in the Config datastore and returns the state about success."," points to a data node that must be stored.","Content type does not have to be specified in URI - it can only be the Configuration datastore."]},{"i":"post-restsdataidentifier","l":"POST /rests/data/","p":["Creates the data if it does not exist in the Config datastore, and returns the state about success."," points to a data node where data must be stored.","The root element of data must have the namespace (data is in XML) or module name (data is in JSON)."]},{"i":"post-restsdata","l":"POST /rests/data","p":["Creates the data if it does not exist under data root.","In the following example, the 'toaster' module is the root container in YANG (it doesn't have any parent). This example also makes it clear that URI doesn't contain 'toaster' node in comparison to a PUT request that must contain the name of the created node in URI."]},{"i":"delete-restsdataidentifier","l":"DELETE /rests/data/","p":["Removes the data node in the Config datastore and returns the state about success."," points to a data node that must be removed."]},{"i":"patch-restsdataidentifier","l":"PATCH /rests/data/","p":["The patch request merges the contents of the message-body with the target resource in the Configuration datastore (content-type query parameter is not specified)."," points to a data node on which PATCH operations is invoked.","This request is implemented by Plain PATCH functionality, see more details on the following page: RFC-8040 documentation - Plain PATCH operation.","Plain patch can be used to create or update, but not delete, a child resource within the target resource. Any pre-existing data which is not explicitly overwritten will be preserved. This means that if you store a container, its child entities will also merge recursively.","The following example shows the PATCH request used for modification of Ethernet interface IP address and two connection settings. Note that other settings under system:system container are left untouched including other leaves under 'connection' container and 'ethernet' list item."]},{"i":"patch-restsdataidentifierapply-tagstrue","l":"PATCH /rests/data/?apply-tags=true","p":["The patch request with parameter apply-tags=true allows to use tags.","Tags allows us to use differrent operation for separate elements instead of merging whole content as without tags.","The following tags are supported: merge, replace, delete, create and update.","Usage of these tags are explained in Templates manager : here.","The following example shows PATCH request used for modification of interfaces on IOS XE device including creating, deleting, and replacing interface configuration."]},{"i":"post-restsoperationsmodulenamerpcname","l":"POST /rests/operations/:","p":["Invokes RPC on the specified path.",": - is the name of the module and is the name of the RPC in this module.","The Root element of the data sent to RPC must have the name “input”.","The result has the status code and optionally retrieved data having the root element “output”.","The answer from the server could be:","GET /rests/operations request can be used to retrieve all available RPCs that are registered in distribution.","More information is available in the RESTCONF RFC 8040."]},{"i":"post-restsdatapath-to-operation","l":"POST /rests/data/","p":["Invokes action on the specified path in the data tree.","Placeholder represents data path to operation definition that is specified under composite data schema node in YANG (only containers and lists may contain action definition).","Content query parameter doesn't have to be specified (it will be ignored), action is represented equally in Operational and Config datastore.","Both RFC-8040 (YANG 1.1) and TAIL-F actions are supported. TAIL-F actions can be placed in both YANG 1.0 and YANG 1.1 schemas. There aren't any differences in the invocation of these types of actions using RESTCONF API.","The body of the action invocation request may contain a root 'input' container. If the action definition has no specified input container, it is not required to specify the body in the request.","The response contains the status code and optionally retrieved data having the root element 'output'.","Currently, FRINX UniConfig only supports invocation of actions under NETCONF mountpoint, must contain'yang-ext:mount' container.","Structure of 'input' and 'output' elements are the same as the structure of these containers when we invoke YANG RPC.","Assume the following YANG snippet with root container named'interfaces':","Invocation of the action named 'compute-stats' that is placed under the'interfaces' container of NETCONF mountpoint:","Difference between RPCs and actions: Actions are bound to a data tree and they can be placed under containers and lists (they cannot be specified as root entities in YANG schema). RPCs are not placed in the data tree and for this reason, they can only be specified as root entities in the YANG schema."]},{"l":"Selecting Data","p":["For selecting and identifying data is good to use query parameter fields. This parameter has to be used only with the GET method. The response body is output filtered by field-expression as the value of fields parameter."]},{"l":"Fields","p":["The response body is the output filtered by the field-expression as a value of the fields parameter.","The example of using the fields parameter: path?fields=field_expression","There are several rules, that need to be followed:","For filtering more than one field of the same parent, \";\" needs to be used. Example : path?fields=field1;field2, where field1 and field2 has the same parent, which is the very last part of the path.","For nesting, \"/\" needs to be used. Example : path?fields=field1;pathField/field2, where field1 and field2 has not the same parent, but pathField is on the same level as field1.","This is a different approach to do nesting, however, the difference between \"(\" and \"/\" is that once we use \"/\" for specifying some field, we cannot identify another field from the upper layers.","This is the case where pathField1 and pathField2 have the same parent, this is not allowed, because once we use \";\" it is expected to specify fields on the same layer as field1","Examples: With 2 approaches (nesting, sub-selecting)","Example of filtering the entire configuration of all interfaces (name, with the config):","Example of filtering all names of interfaces and all names of configs of interfaces:","Example of filtering all names of interfaces with type from the config of interfaces:"]},{"l":"Filtering Data","p":["For filtering data based on specific value is good to use jsonb-filter query parameter. This parameter has to be used only with the GET method."]},{"l":"Jsonb-filter","p":["JSONB filtering"]},{"l":"Pagination","p":["To further extend the ability to filter data according to our needs, we can use pagination in the GET method.","There are 3 pagination parameters that can be used individually or in combination with each other :","offset : This parameter lets us choose on which list entry value we want data to start rendering.","limit : Limit gives us the option to control how many node values are going to be displayed in our GET request.","fetch=count : Used to obtain the amount of children nodes in a specific node.","Beware that pagination works only for list nodes.","The example of using individual pagination parameter:","The example of using two pagination parameters simultaneously:","The example of using fetch count parameter:","The response body of fetch count parameter with a path from the previous example:"]},{"l":"Sorting","p":["This utility helps us to sort list data from GET request according to our needs in ascending or descending order.","To sort some data, use a query parameter called sortby that will include at least one identifier of child leaf and sort direction. The first part of the value represents leaf identifier, the second part enclosed in brackets represents sort direction ('asc' or 'desc'). If there are multiple leaves based on which sorting is done, they are separated by semicolon.","Sorting, just like pagination, can only be used on list nodes.","The example of using sortby parameter with 1 value (sorting by the value of 'name' leaf):","The example of using sortby parameter with 2 values (sorting by values of 'name' and 'revision' leaves, in that order):","The example of using sortby and pagination simultaneously:","It is possible to specify module-name as part of the leaf identifier. Module-name must be specified only if there are multiple children leaves with the same identifier but specified from different namespaces. Example:","In the case of union types specified on leaf nodes, sorting is done in the blocks that are ordered by the following strategy:","leaves without value","empty type","boolean type","random numeric type","types that can be represented by JSON string"]},{"l":"Inserting"},{"l":"Insert query parameter","p":["The 'insert' query parameter can be used to specify how an item should be inserted within an list or leaf-list. This parameter is only supported for the POST and PUT methods. It is also only supported if the target list or leaf-list is marked as 'ordered-by user' in YANG model.","The allowed values for 'insert' query parameter:","Value","Description","first","Insert the new item as the new first entry.","last","Insert the new item as the new last entry (default value).","before","Insert the new item before the insertion point, as specified by the value of the 'point' query parameter.","after","Insert the new data after the insertion point, as specified by the value of the \"point\" parameter.","If the values 'before' or 'after' are used, then a 'point' query parameter for the 'insert' query parameter MUST also be present."]},{"l":"Point query parameter","p":["The 'point' query parameter is used to specify the insertion point for an item that is being created or moved within an'ordered-by user' list or leaf-list. Like the 'insert' query parameter, 'point' query parameter is only supported for the POST and PUT methods and also if the target list or leaf-list is marked as 'ordered-by user' in YANG model. The value of the 'point' query parameter is a string that indicates the key of the insertion point item. If the key is composite, the key items must be separated by a comma."]},{"l":"Examples","p":["Next five examples show usage of 'insert' and 'point' query parameters for leaf-list. First example shows how leaf-list looks before update. There are no differences in the use of the list and leaf-list."]},{"l":"List before update"},{"l":"Insert item at the top of the list"},{"l":"Insert item at the bottom of the list"},{"l":"Insert item after specific item"},{"l":"Insert item before specific item"},{"l":"Retrieving data"},{"l":"With-defaults query parameter","p":["All data nodes are reported, including any data nodes with YANG default in scheme, which are not set by client are reported.","Data nodes set to its YANG schema default value are not reported.","Data nodes set to its YANG schema default value by the client are reported.","Description","Example Data Set By User:","Example YANG Module:","explicit","report-all","The 'with-defaults' query parameter is used to specify how information about default data nodes is returned in response to GET requests on data resources. The response body is output filtered by value of with-defaults parameter.","The allowed values for 'with-defaults' query parameter:","The example of using the with-defaults query parameter: path?with-defaults or path?with-defaults=value","trim","Using with-defaults without value is equivalent to value 'report-all'.","Value","Value Explicit","Value Report-All or Without Value","Value Trim"]},{"l":"JSON Attributes","p":["Node attributes can be encoded in JSON by wrapping all the attributes in the '@' container and values or arrays in the '#' JSON element. This notation is inspired by one that is used in the 'js2xmlparser' open-source tool (conversion between JSON and XML structures): js2xmlparser","RESTCONF supports both serialization and deserialization of attributes, GET response shows all set attributes in the read data-tree and PUT/POST/PLAIN PATCH methods can be used for the writing of data nodes with attributes. Warning: attributes cannot be directly addressed using RESTCONF URI that would contain the '@' element in the path, because attributes are always bound to some data node, they are not represented by distinct nodes in the data-tree.","Reserved '@' container may contain multiple attributes. Each attribute is encoded in the same fashion as leaf nodes, there is an identifier of the attribute and attribute value.","Format of the attribute that is defined in the [module]:","Format of the attribute that is defined in the same module as the parent data entity:"]},{"i":"example---leaf-with-attributes","l":"Example - leaf with attributes","p":["Leaf without attributes:","The same leaf with set 2 attributes: 'm1:attribute-1' and'm1:attribute-2':"]},{"i":"example-container-with-attributes","l":"Example: Container with Attributes","p":["A container without attributes:","The same container with set 2 attributes: 'm1:switch' and'm2:multiplier':"]},{"i":"example-leaf-list-with-attributes","l":"Example: Leaf-list with Attributes","p":["Leaf-list without attributes:","The same leaf with set 1 attribute: 'mx:split':"]},{"i":"example-leaf-list-entry-with-attributes","l":"Example: Leaf-list Entry with Attributes","p":["Leaf-list without attributes:","Two leaf-list entries, leaf-list entry with value '10' has one attribute with identifier 'm1:prefix'. The second leaf-list entry '20' doesn't have any attributes assigned."]},{"i":"example-list-with-attributes","l":"Example: List with Attributes","p":["List without attributes:","The same list with applied single attribute: 'constraints:length'."]},{"i":"example-list-entry-with-attributes","l":"Example: List Entry with Attributes","p":["List with two list entries without attributes:","The same list entries, the first list entry doesn't contain any attribute, but the second list entry contains 2 attributes: 'm1:switch' and 'm2:multiplier'."]},{"l":"Device Schema Filters","p":["By default, all input and output data produced by RESTCONF for the selected device is fully compliant with its YANG models. Any violation of the YANG schema definitions will result in an error. Some of these restrictions can be addressed by adding the 'schemaFilters' configuration parameter for the RESTCONF."]},{"l":"Configuration Options Overview","p":["Following configuration options for 'schemaFilters' make RESTCONF processing less restrictive:"]},{"l":"Configuration Example","p":["The following example demonstrates how to enable schema filters for selected extensions and make RESTCONF ignore unknown definitions and definitions with a 'deprecated status' attribute."]},{"i":"unhide-parameter-for-readwrite-operations","l":"Unhide Parameter for READ/WRITE Operations","p":["RESTCONF supports the 'unhide' query parameter for the GET requests to include hidden definitions into the response and for PUT/POST/PATCH requests to accept hidden definitions in the input. This parameter value can be populated with a comma-separated list of extensions to unhide or the keyword 'all' to include all possible hidden definitions in the response.","Example of using the 'unhide' parameter for the GET and PUT/POST/PATCH requests.","Using unhide with a list of extensions","Using unhide parameter to unhide all hidden definitions"]},{"l":"Leafref validation","p":["According to YANG standard there are constraints for leafrefs. These constraints are not validated by default. Leafref validation can be enabled using checkForReferences query parameter with value set to true."]},{"i":"example","l":"Example:"},{"l":"Using leafref validation"},{"l":"Example output of failed validation","p":["If checkForReferences parameter is set to false or is not provided UniConfig will not perform leafref validation and there will be no leafref validation error."]},{"l":"Hide Empty Data Nodes","p":["Query parameter 'hideEmptyDataNodes' is used to hide empty composite data-tree nodes in response to GET call. Data nodes that contain only attribute tag are considered to be empty too. Default value is 'false' - empty nodes are displayed in the GET response."]},{"i":"example-1","l":"Example"},{"l":"Escaping keys in URI","p":["Following characters must be escaped, if they are contained in a list key value:':', '/', '?', '#', '[', ']', '@', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '='.","There are 2 ways how to escape special characters in a key value: by encoding reserved UTF-8 characters using '%HH' patten or using key delimiter."]},{"l":"Encoding reserved characters","p":["RESTCONF RFC-8040 natively allows to specify reserved characters in a key value, if they are encoded using'%HH' pattern, where 'HH' refers to hexadecimal representation of UTF-8 character.","The following request demonstrates encoding of special characters in the 'ge0/0/1' interface name.","Mappings between special characters and UTF-8 codes can be found on following site: https://www.urlencoder.org/"]},{"l":"Demarcation of key using delimiter","p":["UniConfig allows to specify delimiter used for demarcation of list key value. Afterwards, all special characters inside key are automatically escaped.","By default, key delimiter is disabled. It must be specified in the 'config/lighty-uniconfig-config.json' file:","The following request demonstrates demarcation of interface name 'ge0/0/1' using '%22' delimiter."]},{"l":"Hide Attributes","p":["Query parameter 'hideAttributes' is used to hide composite data-tree nodes attributes in response to GET call. Default value is 'false' - nodes attributes are displayed in the GET response."]},{"i":"example-2","l":"Example"},{"i":"callbacks-http-client","l":"Callbacks (http-client)","p":["Callbacks include sending GET (call-point) and POST (action) requests to the remote server. They are implemented mainly for UniConfig Shell, but can also be used by RESTCONF for UniStore nodes by using the URI prefix:"]},{"i":"examples-1","l":"Examples","p":["Example - call-point invocation in RESTCONF","Response:","Example - action invocation in RESTCONF","Callbacks must be configured before use. For more details, see Callbacks."]}],[{"l":"UniConfig Queries","p":["This module is responsible for execution of queries on the configuration of some device, template, UniStore node, or snapshot."]},{"l":"RPC query-config","p":["UniConfig exposes filtering and selection API using RPC 'query-config'. Filtering and selection of configuration is done only on the database side - UniConfig receives already narrowed configuration with only selected data. Since query is evaluated by the database, this feature works only with already committed data (operational data).","The following sequence diagram captures the whole process of RPC execution in detail.","Execution of RPC query-config"]},{"l":"RPC input fields","p":["topology-id: Identifier of network-topology/topology list entry. Currently, supported topologies, under which this RPC can be used, are: 'uniconfig', 'templates', 'unistore', and snapshot topologies.","node-id: Identifier of specific network-topology/node list entry whose configuration is filtered using specified jsonb-path-query.","jsonb-path-query: JSONB-path query used for selection and filtering of subtrees in the node configuration stored in the PostgreSQL. JSONB-path must start from root \"frinx-uniconfig-topology:configuration\" container(it is always represented by absolute path).","JSONB-path query syntax is specified by PostgreSQL. You can find detailed description of all features with examples on the following link (version 14): https://www.postgresql.org/docs/14/functions-json.html#FUNCTIONS-SQLJSON-PATH"]},{"l":"RPC output fields","p":["config: List of selected and filtered JSON objects. Note that database may return multiple list entries, if the last element in the JSONB-path is represented by list/leaf-list YANG schema node. In other cases, only one or no JSON object is displayed on output based on fulfilling the filtering and selection criteria."]},{"i":"example-selection-of-json-object","l":"Example: selection of JSON object","p":["The following request demonstrated execution of simple selection query under the 'dev01' from 'uniconfig' topology. Response contains 1 JSON object - 'ssh' container.","JSONB-path query should always start with $.\"frinx-uniconfig-topology:configuration\" pattern because 'configuration' represents wrapping element for all root data elements that are stored in database.","Be aware that PostgreSQL requires escaping of special characters in the identifiers of JSON elements. For example,':' and '-' represent special characters. Because of this behaviour, it is always safer to put double quotes around all identifiers as it is done in this example."]},{"i":"example-filtering-list-of-json-objects","l":"Example: filtering list of JSON objects","p":["The next query demonstrates filtering of 'address' JSON objects using predicate based on 'ipv4-address'(the first octet must have a value '80'). Addresses under all 'controller' list entries are filtered. In this example, response contains multiple JSON objects representing 'address' list entries."]},{"i":"example-selection-of-leaf-list-content","l":"Example: selection of leaf-list content","p":["The next request shows selection of all addresses under ethernet interfaces with type 'vxlan' and 'enabled' flag set to 'true'. Response will contain aggregated array of strings, because 'address' is represented by leaf-list."]},{"i":"example-non-existing-node","l":"Example: non-existing node","p":["If node with specified identifier doesn't exist under target topology, RPC will return 400 with corresponding error message."]},{"i":"example-syntax-error","l":"Example: syntax error","p":["In case of invalid form of input 'jsonb-path-query', UniConfig will return 400 status code with error-message describing syntax error."]}],[{"l":"UniConfig Shell","p":["UniConfig shell is a command-line interface for Uniconfig.","Accessible over SSH, it allows users to interact with Uniconfig features including the following:","Read operational data of devices","Manipulate device configuration","Manipulate configuration templates","Manipulate data stored in Unistore","Invoke device or UniConfig operations","Manipulate global UniConfig settings","As Uniconfig shell is model-driven, its interface is mostly auto-generated from YANG schemas (e.g., tree structure of data-nodes or available RPC/action operations)."]},{"l":"Configuration","p":["By default, UniConfig shell is disabled. To enable it, set the configuration parameter cliShell/sshServer/enabled to true in the config/lighty-uniconfig-config.json file.","All available settings and descriptions are listed below:","After starting UniConfig, the SSH server will listen for connections on port 2022 and the loopback interface."]},{"l":"Navigating in the shell","p":["Every command line starts with a command prompt that ends with the character. The identifier of the command prompt changes based on the current shell mode and the state of execution in this mode.","The commands exit and quit are available in all shell modes:","exit returns the state to the parent state","quit returns the state to the nearest parent mode (e.g., configuration mode, root mode, operational show mode). If the current state of the shell represents some mode, 'quit' and 'exit' have the same effect of returning to the parent mode.","Typed commands are sent to UniConfig using the ENTER key. UniConfig processes the command and may send a response to the console depending on the command behaviour. All commands are processed synchronously, meaning that multiple commands cannot be executed in parallel in the same SSH session.","CTRL-A and CTRL-E move the cursor to the beginning or end of the current line.","CTRL-L clears the shell screen.","Arrow keys UP/DOWN are used to load previous commands in the command history.","CTRL-C cancels the current line and moves to a new blank line.","TAB loads suggestions in the current context. Hit TAB again to navigate through suggested commands using the arrow keys and select using ENTER. Leave the submode with suggestions using the shortcut CTRL-E. The text in brackets contains a description of the next command.","If the output is longer than the length of the command-line window, the output is displayed with scrolling capability. Use ENTER to display the next line and SPACE to display the next page. Use the q key to leave scrolling mode. You can only scroll only in one direction, towards the end of the output.","Scrolling through long output"]},{"l":"Root mode","p":["Root mode is the initial mode after successful authentication.","Example: Log into UniConfig shell:","The exit command is used to exit the UniConfig shell interface altogether (disconnecting SSH client).","Example - Exit UniConfig shell:","Currently, only username/password single-user authentication is supported as configured in the application.properties file."]},{"l":"Accessing sub-modes","p":["Root mode acts as a gateway to open the configuration and show modes.","Example - Switch to configuration mode:"]},{"l":"Show command history","p":["The show-history command is used to display a list of N last invoked commands. This command is also available in configuration mode.","Example - Show the last five executed commands:","Note that the list of invoked commands persists across UniConfig restarts and SSH connections."]},{"l":"Unhide and hide operations","p":["The following commands are used to unhide and hide attributes in application properties:","unhide-get is used to unhide an attribute hidden in application properties for read purposes.","unhide-set is used to unhide an attribute hidden in application properties for write purposes.","hide-get is used to hide attributes that were unhidden with unhide-get.","hide-set is used to hide attributes that were unhidden with unhide-set.","When unhide is set for a GET or SET operation, the request URL for the operation contains the unhide query parameter. In the following example, the unhide parameter is set to all:","http://localhost:8181/rests/data/network-topology:network-topology/topology=uniconfig/node=vnf21/configuration?unhide=all","The command also gives confirmation that the attribute was added to or removed from the unhidden list.","When unhide-get or hide-get are called without parameters, the output contains a list of all unhidden parameters. The same applies to unhide-set and hide-set.","When used with the parameter all, the unhide operation applies to all parameters defined in application properties for read or write purposes."]},{"l":"Configuration mode","p":["Configuration mode provides access to the following:","CRUD operations on top of persisted UniConfig, UniStore and template nodes","CRUD operations on top of persisted UniConfig settings","UniConfig RPC operations such as commit or calculate-diff","After opening configuration mode, a new UniConfig transaction is created. All operations invoked in configuration mode are executed in the scope of the created transaction. The transaction is automatically closed after leaving configuration mode ( exit or quit command).","If commit or checked-commit are invoked, the transaction is automatically refreshed. The user stays in configuration mode with a newly created transaction.","Commands like SET / SHOW / DELETE are now available only on a specific device and are not accessible in root configuration mode."]},{"l":"Show configuration","p":["The show operation can be used to display selected subtrees.","The subtree path can be constructed interactively with the help of shell suggestions / auto-completion mechanism. Construction of the path works the same way for SET / SHOW/ DELETE operations.","Example - Display the configuration of a selected container:","First move into a specific topology on a specific device:","After this, the show operation is available:"]},{"l":"Delete configuration","p":["The delete operation removes a selected subtree.","Example - Remove a container:","First move to a specific topology on a specific device:","After this, the delete operation is available:","Quit to configuration mode, commit using request mode and return to the device on the topology:"]},{"l":"Set configuration","p":["The set operation can be used for the following:","Set the value of a single leaf.","Set the values of multiple leaves in a single shell operation.","Set a list of values for a leaf-list.","Replace the entire subtree using a JSON snippet.","Example - Set the value of a single leaf:","Example - Set values for multiple leaves under the 'hold-time' container:","A JSON snippet can be written to a selected data-tree node by entering the json sub-mode. In this sub-mode, you can type multiple lines that must represent a well-formed JSON document. At the end, confirm the set operation using the pattern 'w!' + newline, or cancel the set operation with the pattern 'q!' + newline.","Example - Replace configuration of an interface using a JSON snippet:","Example - Leave json sub-mode without executing set operation:"]},{"l":"Execute UniConfig operation","p":["The request command is used to execute UniConfig operations such as commit or calculate-diff in the UniConfig transaction:","The command is available in configuration mode.","You can fill in input parameters and values interactively or via provided JSON snippet.","Example - Execute UniConfig RPCs in the scope of the open UniConfig transaction:"]},{"l":"Request operational mode","p":["This command has been merged with request configuration mode and is now only available in configuration mode.","Request mode allows users to:","Invoke selected UniConfig requests that read or alter UniConfig settings.","Invoke RPCs or actions that are provided by network devices or other southbound mount-points.","Input parameters and values can be filled in interactively or via a provided JSON snippet. The transaction is passed from configuration mode.","Example - Invoke RPC execute-and-read with typed input parameters:","Example - Execute the same RPC 'execute-and-read' using input JSON:","UniConfig shell does not support interactive typing of input arguments for an RPC/action that contains the list YANG element. Such operations must be executed using input JSON."]},{"l":"Show operational mode","p":["Show mode allows users to:","Display operational data about UniConfig itself (e.g., logging status, list of open transactions or list of acquired subscriptions).","Display operational data of network devices.","After opening show mode, a new UniConfig transaction is opened. The transaction is closed when you leave this mode.","Example - Display configuration of selected subtree:","Example - Display selected system configuration:"]},{"l":"Pipe operations","p":["UniConfig shell supports pipe operations similar to Unix shell/bash pipes. When a command is followed by the pipe sign |, the output of the command is passed to the selected pipe operation.","Example:","Supported pipe operations are:","grep - Show only lines that match supplied regex","match - Same as grep, but can be used with optional parameters to also show lines before and after matched lines","context-match - Same as grep, but also shows parent structure","brief - Display root elements in short table format","hide-empty-data-nodes - Hide data nodes without child nodes","hide-attributes - Hide attributes of data nodes"]},{"l":"Redirecting output","p":["The output of an executed command can be redirected to a file using the sign followed by a filename.","Example:","In this case, output in the console is empty but the content of the output.txt file is a follows:"]},{"l":"Aliases","p":["You can define aliases in UniConfig shell. For this purpose, there is a json file named shell-aliases in the UniConfig distribution. After unpacking the UniConfig distribution, the file can be found under Uniconfig/distribution/packaging/zip/target/uniconfig-x.x.x/config. The file contains some sample aliases."]},{"l":"Alias creation","p":["Aliases cannot be created dynamically, only before Uniconfig is started. The following rules apply:","The alias name must be unique and cannot contain whitespaces.","The command can contain a wildcard (*). In this case, the user is prompted to add a value.","The alias is only visible in the mode where it was defined.","Example - Execute the alias 'diff xr5':","Example: Execute the alias 'lbr':","Example - Execute the alias 'shh':"]},{"l":"Callbacks","p":["Callbacks include sending POST and GET requests to the remote server and invoking user scripts from the UniConfig shell.","The following is required to use callbacks:","Necessary YANG modules - YANG modules that are required by the callbacks.","Configuration - Enable callbacks in config/application.properties and set the remote server and access token.","Update repository - Add the necessary YANG modules from step 1 into at least one YANG repository in the cache directory, and either define remote endpoints and scripts in a YANG file or create a new one for callbacks. For a definition of remote endpoints, use the frinx-callpoint@2022-06-22.yang extension.","UniStore node - Create a UniStore node using the YANG repository containing the necessary YANG modules from step 1 and a YANG file with defined endpoints and scripts.","In UniConfig shell, step 4 is optional as UniConfig creates dummy UniStore nodes for all repositories that meet the conditions in step 3. In this case, the dummy UniStore node name is identical to the YANG repository name.","In RestConf, step 4 is mandatory."]},{"l":"Necessary YANG modules","p":["The following YANG modules are required:","frinx-callpoint@2022-06-22.yang(not needed for scripts)","tailf-common@2018-11-12.yang","tailf-meta-extensions@2017-03-08.yang","tailf-cli-extensions@2018-09-15.yang"]},{"i":"configuration-1","l":"Configuration","p":["By default, callbacks are disabled and the host and port for the remote server are empty in config/lighty-uniconfig-config.json.","To enable callbacks, set the configuration parameter callbacks/enabled to true. It is also necessary to set the host and port for the remote server and store an access token in the UniConfig database.","The host and port for the remote server can be set in three ways:","Before starting Uniconfig, in the config/application.properties file. The port number is optional:","After starting UniConfig, with a PUT request:","After starting UniConfig, with cli-shell:","The access token can be stored in the UniConfig database in one of two ways:","Available settings and descriptions for callbacks are listed below:"]},{"l":"Update repository","p":["First, create or update the YANG repository by using the frinx-callpoint@2022-06-22.yang extension displayed in the following snippet. There is only one extension, url, with the argument point."]},{"i":"add-call-point-get-request","l":"Add call-point (GET request)","p":["The following snippet shows how to create a call-point in the frinx-test YANG file by using the frinx-callpoint@2022-06-22.yang extension.","The argument of the 'url' extension is '/data/from/remote', which is appended to the end of the remote server URI configured in 'config/lighty-uniconfig-config.json'. Thus the final address for the remote call-point is'https://remote.server.io/data/from/remote'."]},{"i":"add-action-post-request","l":"Add action (POST request)","p":["The following snippet shows how to create an action in the frinx-test YANG file by using the frinx-callpoint@2022-06-22.yang extension. You must also import tailf-common.yang.","The action consists of:","The action name, defined by tailf:action.","The suffix for the remote endpoint, defined by fcal:url.","The input that contains body of the request. This part is optional."]},{"l":"Add script","p":["The following snippet shows how to create a script in the frinx-test YANG file by using tailf-common.yang. It is not necessary to import the frinx-callpoint@2022-06-22.yang extension.","The script consists of:","The script name, defined by tailf:action.","The path to the script, defined by tailf:exec.","Arguments for the script, defined by tailf:exec.","Arguments can be dynamic (i.e., the user can pass values to them) or static (flags). Follow these conventions when creating arguments:","Each argument must contain a name (for example, -n, -j).","Dynamic arguments must be enclosed in $(...)(for example, $(name)).","Flags are simple words without whitespace (for example, VIP, UPPER, upper)."]},{"l":"UniStore node","p":["A UniStore node can be created by RestConf or UniConfig shell. If a repository is explicitly defined by the query parameter ?uniconfig-schema-repository=repository-name, this repository must contain all necessary YANG modules. If a repository name is not defined when the UniStore node is created, all necessary YANG modules must be in the latest schema repository."]},{"l":"Examples","p":["Example - Invoke callpoint in shell:","Example - Invoke action in shell:","Example - Execute user script in shell:"]}],[{"l":"UniStore API"},{"l":"Introduction","p":["UniStores nodes are used for storing and management of various settings/configuration inside UniConfig. The difference between UniStore and UniConfig nodes is that UniConfig nodes are backed by a(real/network) device whereas UniStore nodes are not reflected by any real device. In case of UniStore nodes, UniConfig is used only for management of the configuration and persistence of this configuration into PostgreSQL DBMS.","Summarized characteristics of UniStore nodes:","UniStore nodes are not backed by 'real' devices / southbound mount-points - they are used only for storing some configuration - configuration is only committed to PostgreSQL DBMS.","Configuration of UniStore node can be read, created, removed, and updated the same way as it is done with UniConfig topology nodes - user can use the same set of CRUD RESTCONF operations and supported UniConfig RPCs for operation purposes.","UniStore nodes are placed in a dedicated 'unistore' topology under network-topology nodes. The whole configuration is placed under'configuration' container.","UniStore configuration is modelled by user-provided YANG schemas that can be loaded into UniConfig - at creation of UniStore node, user must provide name of the YANG repository, so UniConfig known how to parse configuration (query parameter'uniconfig-schema-repository').","UniConfig operations that are supported for UniStore nodes:","all RESTCONF CRUD operations","commit / checked-commit RPC","calculate-diff RPC (including git-like-diff flavour)","subtree-manager RPCs","replace-config-with-oper RPC","revert-changes RPC (transaction-log feature)","Node ID of UniStore node must be unique among all UniConfig and UniStore nodes."]},{"l":"Commit operation","p":["Actions performed with UniStore nodes during commit operations:","Configuration fingerprint verification - if another UniConfig transaction has already changed one of the UniStore nodes touched in the current transaction, then commit operation must fail.","Calculation of diff operation across all changed UniStore nodes.","Writing intended configuration into UniConfig transaction.","Rebasing actual configuration by intended in the UniConfig transaction.","Updating last configuration fingerprint to the UUID of committed transaction.","Writing transaction-log into transaction.","Committing UniConfig transaction - cached changes are sent to PostgreSQL DBMS."]},{"l":"Example use-case"},{"l":"Preparation of YANG repository","p":["User must feed UniConfig with YANG repository, that will be used for modeling of UniStore node configuration. The same UniStore node can me modeled only by 1 YANG repository, however, different nodes can track next different YANG repositories. YANG repository can be provided to UniConfig by copying directory with YANG files under 'cache' parent directory. Afterwards it is loaded either at startup or in runtime using'register-repository' RPC.","For demonstration purposes, let's assume that cache contains YANG repository 'system' with simple YANG module:"]},{"l":"Creation of UniStore node","p":["The next request shows creation of new UniStore node 'global' using provided JSON payload and name of the YANG repository that is used for parsing of the provided payload (query parameter'uniconfig-schema-repository'). Note that this yang repository must be specified only at the initialization of UniStore node."]},{"l":"Reading content of UniStore node","p":["The following sample shows reading of UniStore node content using regular GET request. Query parameter 'content' is set to 'config' to point out the fact that UniStore node is cached only in the Configuration data-store of transaction (Operational data-store is at this time empty)."]},{"i":"calculate-diff-rpc-created-node","l":"Calculate-diff RPC (created node)","p":["Calculate-diff operation is also supported for UniStore nodes. the following request shows difference of all touched nodes in the current transaction including UniStore nodes. Since UniStore node has only been created, diff output only contains 'created-data' with whole root'settings' container."]},{"l":"Persistence of UniStore node","p":["In case of UniStore nodes, commit RPC is used for confirming done changes and storing them into PostgreSQL DBMS. As it was explained in the previous section, commit operation causes storing of UniStore node configuration and transaction-log in the DBMS, operation doesn't touch any network device.","It is possible to combine changes of UniStore and UniConfig nodes in the same transaction and commit them at once."]},{"l":"Reading committed configuration","p":["The configuration is also visible in the Operation data-store of newly created transaction since it was committed in the previous step. The actual state can be shown by appending 'content=nonconfig' query parameter to GET request as it is shown in the next example."]},{"l":"Verification of configuration fingerprint","p":["Configuration fingerprint is used as part of the optimistic locking mechanism - by comparison of the configuration fingerprint from the beginning of the transaction and at commit operation it is possible to find out if other UniConfig transaction has already changed affected UniStore node. In case of UniStore nodes, fingerprint is always updated to the value of transaction-id (UUID) of the last committed transaction that contained the UniStore node."]},{"l":"Modification of configuration","p":["The same RESTCONF CRUD operations that can be applied to UniConfig nodes are also relevant within UniStore nodes. The following request demonstrates merging of multiple fields using PATCH operation."]},{"i":"calculate-diff-rpc-updated-node","l":"Calculate-diff RPC (updated node)","p":["The second calculate-diff RPC shows more granular changes done into existing UniStore node - it contains 'create-data' and 'updated-data' entries."]},{"l":"Commit made changes","p":["Persistence of made changes under UniStore node can be done using commit RPC."]},{"l":"Displaying content of transaction-log","p":["Committed transactions including all metadata (e.g serialized diff output or transaction ID) can be displayed by reading of'transactions-metadata' container in the Operational data-store. It also displays information about successfully committed UniStore nodes. Afterwards, user can leverage this information and revert some changes using transaction-id that is shown in the transaction-log."]},{"l":"Removal of UniStore node","p":["UniStore node can be removed by sending DELETE request to whole 'node' list entry, 'configuration' container, or by removing of all children'configuration' entities. In all cases, UniStore node will be removed after confirming of changes using commit RPC."]}],[{"l":"YANG Patch Operations","p":["Yang Patch is used for modification of subtrees under configuration. Advantages of YANG Patch in comparison to other RESTCONF operations:","YANG Patch may contain multiple edits with different operations applied to different subtrees","all edits inside YANG Patch are applied atomically - either all edits are successful or PATCH operation will fail and configuration will not be modified","supported reordering of lists (move operation) and inserting of list entry to specific position in the list(insert operation)","UniConfig supports all RFC-specified operations inside edits:","CREATE","REPLACE","MERGE","MOVE","INSERT","DELETE","REMOVE","RENAME","Using these operations, the user is able to reorder lists, create new data, remove data, or update specific data.","For more information, please refer to the official documentation of the RFC YANG patch"]},{"l":"RPC Examples"},{"l":"Creation of list entries","p":["The request creates new list entries in the tvi list. If the data exist, return an error."]},{"l":"Moving list entry","p":["The request moves an existing list entry on a user defined position."]},{"l":"Inserting new list entry","p":["The request inserts new list entries on a user defined position."]},{"l":"Inserting new leaf-list entry","p":["The request inserts a new leaf-list entry on a user defined position."]},{"l":"Replacing list entry","p":["The request replaces an existing value in a list entry."]},{"l":"Merging configuration","p":["The request merges an existing value in a list entry."]},{"l":"Delete list entry","p":["The request deletes a list entry. If the data is missing, returns an error."]},{"l":"Removing list entry","p":["The request removes a list entry."]},{"l":"Renaming list entry","p":["The request renames a list entry key."]},{"l":"Failed deleting of list entry","p":["The request to delete a list entry that is not present."]},{"l":"Sending Patch request with invalid structure","p":["The request is missing some data."]}],[{"l":"Operational Procedures"},{"l":"Logging","p":["The UniConfig distribution uses Logback as its logging framework. Logback is the successor to the log4j framework with many improvements, such as more options for configuration, better performance, and context-based separation of logs. Context-based separation of logs is used widely in UniConfig to achieve per-device logging based on the set marker in the logs."]},{"l":"TLS","p":["TLS is a widely adopted security protocol designed to facilitate privacy and data security for communications over the Internet. TLS authentication is disabled in the default version of UniConfig."]},{"l":"TLS for Postgres database","p":["By default, UniConfig communicates with the database without TLS and traffic is therefore unencrypted. When the database is deployed separately from UniConfig, we recommend that you enable TLS encryption."]},{"l":"OpenAPI","p":["The UniConfig distribution contains a '.yaml' file that generates list of all usable RPCs with examples. You can view it either locally or on our hosted version, which always shows the latest OpenAPI version."]},{"l":"Data Security Models","p":["UniConfig supports encryption and hashing of values in RESTCONF and UniConfig shell API, as well as managing confidential data during transfers between the UniConfig database and network devices."]},{"l":"UniConfig Clustering","p":["The UniConfig stateless architecture allows deployment of the system in a cluster to ensure horizontal scalability and high-availability properties."]},{"l":"Thread pools","p":["UniConfig uses thread pools in several places. They can be configured in the application.properties file."]},{"i":"data-flows--transformations","l":"Data flows & transformations","p":["There are multiple paths and transformations of data within Uniconfig. The following section provides more information on some of the more common paths.","Thread pools"]}],[{"l":"Data flows and transformations","p":["Architecture","Flows","CLI, direct to device, plaintext interface","CLI, direct from device, configuration data read","CLI, direct from device, operational data read","CLI, direct to device, configuration data write","Netconf, direct from device, configuration data read","Netconf, direct from device, operational data read","Netconf, direct to device, configuration data write","Uniconfig, cached intent configuration, data read","Uniconfig, cached applied configuration, data read","Uniconfig, applying intent to a device","Uniconfig, synchronizing applied configuration from network"]},{"l":"Architecture","p":["CLI- Southbound plugin for managing devices over CLI (SSH).","Data flow architecture","gNMI- Southbound plugin for managing devices over gNMI (SSH).","JSON YANG RFC provides information on how JSON is used.","NETCONF- Southbound plugin for managing devices over Netconf (SSH).","Northbound","Restconf RFC provides detailed information on YANG-based REST API specifics.","Restconf- REST API for Uniconfig.","SNMP- // TBD","Southbound","The following diagram outlines the basic architecture for this purpose. It gives a simplified overview and only includes a subset of components, but serves as a baseline for illustrating various data flows.","The following main components are included:","The Uniconfig core consists of many different components/features. A good place to start is the build-and-commit model in Uniconfig.","Uniconfig CLI shell- CLI interface for Uniconfig. Similar capabilities as RESTCONF, but intended for users who prefer CLI access.","Uniconfig core","Uniconfig java SDK documentation provides an overview.","Uniconfig Java SDK- Java SDK for Uniconfig. Uses Restconf internally.","Uniconfig restconf documentation provides an overview.","Uniconfig shell documentation provides an overview."]},{"l":"Flows"},{"i":"cli-direct-to-device-plaintext-interface","l":"CLI, direct to device, plaintext interface","p":["Flow for reading/writing arbitrary commands to a CLI device.","The User sends an HTTP POST REST (rpc) request to Uniconfig.","URL specifies Uniconfig defined execute-and-read or execute-and-expect RPC.","URL must specify the following:","topology=cli- CLI-managed device","node=nodeID- specific managed device","Restconf invokes an asynchronous RPC on the southbound layer, but blocks until it completes.","The CLI layer invokes a generic implementation of the plaintext access RPC and returns output from the device as is.","Restconf receives the data from the CLI layer and completes the request.","Restconf example:","To send an arbitrary command to a device and receive a response:","To send a sequence of commands (ssh expect style) and receive a response:","Flow diagram:","Data flow architecture"]},{"i":"cli-direct-from-device-configuration-data-read","l":"CLI, direct from device, configuration data read","p":["?content=config- to specify only configuration data must be read from device (if not present, defaults to config)","CLI readers send specific commands to the device and parse the output into an internal DOM data structure.","Data flow architecture","Flow diagram:","Flow for reading structured (YANG-model based) configuration data from a device over CLI. The data is always retrieved from the device with no cache involved.","node=nodeID- specific managed device","Restconf component parses the URL and validates it against openconfig YANG models.","Restconf example:","Restconf invokes an asynchronous read from the southbound layer, but blocks until it completes.","Restconf receives the data from CLI layer, serializes them into JSON and completes the request.","The CLI layer finds appropriate an CLI driver (cli units) and invokes all readers registered for a specific path provided in the URL.","The user sends an HTTP GET REST request to Uniconfig.","To get configuration data for all interfaces:","topology=cli- CLI-managed device","URL must conform to openconfig data models used for all CLI devices.","URL must specify the following:"]},{"i":"cli-direct-from-device-operational-data-read","l":"CLI, direct from device, operational data read","p":["Flow for reading structured (YANG-model based) operational data from a device over CLI. The data is always retrieved from the device with no cache involved.","This flow is identical to CLI, direct from device, configuration data read flow. The difference is that this READ returns a combination of configuration and operational data ! To invoke operational data read, use ?content=nonconfig in the URL, the rest of URL is no different","Warning! Be careful when requesting operation data from devices. The data can be massive and the act of reading such data can cause issues on device itself. Always be as specific as possible, i.e., use the most specific (longest) URL possible.","Restconf example:","To get configuration and operational data for all interfaces:","Flow diagram:","Data flow architecture"]},{"i":"cli-direct-to-device-configuration-data-write","l":"CLI, direct to device, configuration data write","p":["?content=config- only configuration data is read from the device","CLI writers send specific commands to the device and check the output for errors.","Data flow architecture","Flow diagram:","Flow for writing structured (YANG-model based) configuration data to a device over CLI. The data is transformed and sent directly to a device.","node=nodeID- specific managed device","Note: We do not recommend writing directly to a device. The preferred option is to use Uniconfig core to build an intent and commit the changes to the network.","Restconf component parses the URL and the payload and validates them against openconfig YANG models.","Restconf example:","Restconf invokes an asynchronous write on the southbound layer, but blocks until it completes.","Restconf receives a success or failed response from the CLI layer and maps it to the appropriate status code.","The CLI layer finds the appropriate CLI driver (cli units) and invokes all writers registered for a specific path provided in the URL.","The payload must contain valid JSON that correspondos to the URL points within the YANG model.","The user sends an HTTP PUT or POST REST request into Uniconfig.","To configure a new Loopback999 interface:","topology=cli- CLI-managed device","URL and payload need to conform to openconfig data models used for all CLI devices.","URL must specify the following:"]},{"i":"netconf-direct-from-device-configuration-data-read","l":"Netconf, direct from device, configuration data read","p":["?content=config- only configuration data is read from the device (if not given, defaults to config)","Data flow architecture","Flow diagram:","Flow for reading structured (YANG-model based) configuration data from a device over Netconf. The data is always retrieved from the device with no cache involved.","node=nodeID- specific managed device","Restconf component parses the URL and validates it against vendor-specific YANG models.","Restconf example:","Restconf invokes an asynchronous read from the southbound layer, but blocks until it completes.","Restconf receives the data from the Netconf layer, serializes them into JSON and completes the request","The Netconf layer serializes the path (URL) into a get-config request with a filter, sends it to the device and parses the output into an internal DOM data structure.","The User sends an HTTP GET REST request to Uniconfig.","To get Loopback999 interface configuration using IOS XR vendor models:","topology=topology-netconf- Netconf-managed device","URL must conform to vendor-specific YANG data models used by the device.","URL must specify the following:","Which models are used depends on the device. Many vendor-specific models can be found on GitHub."]},{"i":"netconf-direct-from-device-operational-data-read","l":"Netconf, direct from device, operational data read","p":["Flow for reading structured (YANG-model based) operational data from a device over Netconf. The data is always retrieved from the device with no cache involved.","This flow is identical to the Netconf, direct from device, configuration data read flow. The difference is that this READ returns a combination of configuration and operational data by using get netconf RPC instead of get-config! To invoke operational data read, use ?content=nonconfig in the URL","Warning! Be careful when requesting operation data from devices. The data can be massive and the act of reading such data can cause issues on device itself. Always be as specific as possible, i.e., use the most specific (longest) URL possible.","Restconf example:","To get operational data for all interfaces using IOS XR vendor models:","Flow diagram:","Data flow architecture"]},{"i":"netconf-direct-to-device-configuration-data-write","l":"Netconf, direct to device, configuration data write","p":["Flow for writing structured (YANG-model based) configuration data to a device over Netconf. The data is transformed and sent directly to a device.","Note: We do not recommend writing directly to a device. The preferred option is to use Uniconfig core to build an intent and commit the changes to the network.","Restconf example:","To configure Loopback999 interface configuration using IOS XR vendor models:","Flow diagram:","Data flow architecture"]},{"i":"uniconfig-cached-intent-configuration-data-read","l":"Uniconfig, cached intent configuration, data read","p":["?content=config- only intent data is read for a device (if not given, defaults to config == intent)","An ad-hoc uniconfig transaction is started.","Data flow architecture","Flow diagram:","Flow for reading structured (YANG-model based), cached intent configuration data (not applied to network) for a device, regardless of its management protocol. The data is retrieved from in-memory cache (or a database, if not available in memory).","For more information about transactions, see Build and commit mode or Immediate commit model.","node=nodeID- specific managed device","Restconf component parses the URL and validates it against device-specific YANG models.","Restconf example:","Restconf invokes an asynchronous read from uniconfig core, but blocks until it completes.","Restconf receives the data from Uniconfig core, serializes them into JSON and completes the request.","The ad-hoc transaction is closed.","The user sends an HTTP GET REST request to Uniconfig.","This is typically a very quick operation compared to reading directly from a device.","To get Loopback999 interface cached intent configuration using IOS XR vendor models:","To get Loopback999 interface cached intent configuration using openconfig models for a device over CLI:","topology=uniconfig- device cached in uniconfig","Transactions can be started automatically by Uniconfig or controlled by the user.","Uniconfig core reads in-memory cached intent (or loads the latest version of data from the database).","URL must conform to models used for that specific device, whether standard or vendor-specific models.","URL must specify the following:"]},{"i":"uniconfig-cached-applied-configuration-data-read","l":"Uniconfig, cached applied configuration, data read","p":["?content=nonconfig- only applied data is read for a device (if not given, defaults to config == intent)","An ad-hoc uniconfig transaction is started.","Data flow architecture","Flow diagram:","Flow for reading structured (YANG-model based), cached configuration data (already applied to the network) for a device, regardless of its management protocol. The data is retrieved from in-memory cache (or a database, if not available in memory).","For more information about transactions, see Build and commit mode or Immediate commit model.","node=nodeID- specific managed device","Restconf component parses the URL and validates it against device-specific YANG models.","Restconf example:","Restconf invokes an asynchronous read from Uniconfig core, but blocks until it completes.","Restconf receives the data from Uniconfig core, serializes them into JSON and completes the request.","The ad-hoc transaction is closed.","The user sends an HTTP GET REST request to Uniconfig.","This is typically a quick operation compared to reading directly from a device.","To get Loopback999 interface cached intent configuration using IOS XR vendor models:","To get Loopback999 interface cached intent configuration using openconfig models for a device over CLI:","topology=uniconfig- device cached in uniconfig","Transactions can be started automatically by Uniconfig or controlled by the user.","Uniconfig core reads in-memory cached, already applied configuration (or loads the latest version of data from a database).","URL must conform to models used for that specific device, whether standard or vendor-specific models.","URL must specify the following:"]},{"i":"uniconfig-applying-intent-to-a-device","l":"Uniconfig, applying intent to a device","p":["Flow for writing structured (YANG-model based) configuration data to Uniconfig's intent. Intent is typically modified for multiple devices. When modifications are completed, a commit is issued to apply the changes to the network. Automated rollback may kick in when a failure occurs.","For more information on this flow, see Build and commit mode or Immediate commit model.","Note: Uniconfig core builds on top of \"direct to device data flows\" and everything south of Uniconfig core is identical to those (direct to device) data flows. For example, Uniconfig core uses the Netconf, direct to device, configuration data write flow to apply configurations to Netconf devices when performing a commit.","Restconf example:","To configure two devices in a single transaction:","Flow diagram:","Data flow architecture"]},{"i":"uniconfig-synchronizing-applied-configuration-from-network","l":"Uniconfig, synchronizing applied configuration from network","p":["An ad-hoc uniconfig transaction is started.","Data flow architecture","Flow diagram:","Flow for synchronizing/updating an applied configuration from a network device. This is useful especially when the configuration is changed in the network directly (outside of Uniconfig). Once the configuration is synchronized, those direct changes can be accepted or reverted in Uniconfig.","For more information about transactions, see Build and commit mode or Immediate commit model.","For more information on this flow, see Sync from network.","Payload must specify a list of devices to be synchronized.","Restconf example:","Restconf invokes an asynchronous RPC in Uniconfig core, but blocks until it completes.","Restconf receives a success or failed response from Uniconfig core and maps it to the appropriate status code and response.","The ad-hoc transaction is committed.","The user sends an HTTP GET REST request to Uniconfig.","To synchronize two devices from the network:","Transactions can be started automatically by Uniconfig or controlled by the user.","Uniconfig core performs direct from device configuration data read flows for each device in parallel.","Uniconfig core stores the new configuration in the applied configuration cache and in the database.","Uniconfig has a mechanism to verify whether a device is out of sync based on the last commit timestamp. A full configuration is performed only if out of sync.","URL specifies Uniconfig defined sync-from-network RPC."]}],[{"l":"Data Security Models","p":["UniConfig supports encryption and hashing of leaf/leaf-list values on SSH and RESTCONF API. Following sections describe supported security models in depth."]},{"l":"Data encryption","p":["UniConfig uses asymmetric encryption for ensuring confidentiality of selected leaf and leaf-list values. Currently, only RSA ciphers are supported (both global UniConfig and device-level key-pairs). Encryption is supported in 'uniconfig', 'unistore', and 'templates' topologies."]},{"l":"Global-device encryption architecture","p":["Both UniConfig and device uses PKI for encryption of data:","UniConfig side: All selected leaves are encrypted using global public key when this data enters UniConfig via RESTCONF API or UniConfig SSH shell API. Afterwards, data is stored in database in the encrypted format. UniConfig has also access to private key which is used internally for decryption of already encrypted data.","Device side: Device exposes public key and UniConfig uses this key for re-encryption of data before it is sent to device ('commit'/'checked-commit' operations). However, device doesn't expose its private key - UniConfig is not able to detect changes done to encrypted data (updated leaves/leaf-lists) - it is only able to detect, if data was removed or created, not updated. Because of this reason, UniConfig assumes that read encrypted data from device has been encrypted using the same public key as it was used by UniConfig.","Following picture depicts data transformations done on UniConfig interfaces:","Global-device encryption model"]},{"l":"Global-only encryption architecture","p":["In comparison to Global-device encryption architecture this model uses only global key-pair for encryption of data. Devices contain only plaintext data.","Public key is used for encryption of received data via RESTCONF, UniConfig shell API, and when syncing configuration from device to UniConfig transaction ('sync-from-network' operation).","Private key is used for decryption of encrypted data before forwarding this configuration to device('commit'/'checked-commit' operations).","Next picture depicts data transformations done on UniConfig interfaces:","Global-only encryption model","Reading of operational data from device directly (GET under 'yang-ext:mount') shows data in unencrypted format. Application gateways should restrict access to mountpoints in this use-case."]},{"l":"YANG support","p":["Leaves and leaf-lists, which value user would like to store encrypted, must be marked using YANG extension without any parameters. Currently, only leaves with 'string' type (direct/indirect with custom type definitions) are supported, since encrypted values are base64 encoded. Also, be aware that type constraints must accept encrypted values.","Example YANG module that defines one 'encrypt' extension:","Usage of the extension in the 'config' module:","Many times, it is not possible to modify existing YANG files because they are already deployed on device, for example device running with NETCONF server. In this case, user can still mark what leaves should be encrypted using additional YANG module that contains deviations.","Example:","Afterwards, user has 2 options how this module can be coupled with modules from device (NETCONF):","Explicit specification of this side-loaded module in the 'install-node' request - using'netconf-node-topology:yang-module-capabilities' settings (see 'Device installation' section).","Automatic detection of side-loaded module - UniConfig looks for specific capability from NETCONF server, inherits its revision, and then looks for side-loaded module with specific name and inherited revision(see 'Configuration' section). This option is preferred, if deployment contains multiple versions of devices and list of encrypted paths are different on each version."]},{"l":"Configuration","p":["Global RSA key-pair is stored inside PEM-encoded files in the 'rsa' directory under UniConfig root. Name of the private key must be 'encrypt_key' and name of the public key must be 'encrypt_key.pub'. If user doesn't provide these files, UniConfig will automatically generate its own key-pair with length of 2048 bits. All UniConfig instances in the cluster must use the same key-pair.","Encryption settings are stored in the 'config/lighty-uniconfig-config.json' file under 'crypto' root object.","Example:","encrypt-enabled - If this setting is false, then encryption is disabled despite other settings or install-node parameters. If this setting is true, then encryption is enabled. The default value is true.","encrypt-extension-id - If this setting is not defined, then encryption is disabled despite other settings or install-node parameters. The value must have the format [module-name]:[extension-name] and specifies extension used for marking of encrypted leaves/leaf-lists in YANG modules. Corresponding YANG module, that contain this extension, can be part of device/unistore YANG schemas, or it can be side-loaded during installation of NETCONF device as imported module from 'default' repository.","netconf-reference-module - Name of the module for which NETCONF client looks for during mounting process. If UniConfig finds module with this name in the list of received capabilities, then it uses its revision in the lookup process for correct YANG module with encrypted paths (using deviations).","netconf-encrypted-paths-module-name - Name of the module which contains deviations with paths to encrypted leaves/leaf-lists. There could be multiple revisions of this file prepared in the 'default' NETCONF repository. NETCONF client in the UniConfig chooses the correct revision based on 'netconf-reference-module-name' setting. Together, netconf-reference-module-name' and 'netconf-encrypted-paths-module-name' can be used for autoload of encrypted paths for different versions of devices.","If 'default' YANG repository contains module with encrypted-paths without defined YANG revision and device does not already provide encryption capability, then encrypted-paths module is used as the last resort during installation of device ('netconfReferenceModuleName' and matching of revisions are ignored)."]},{"l":"Change encryption status","p":["For proper working of this RPC it is necessary to enable notifications with this parameter:","Encryption can be enabled or disabled with the parameter:","The value of this parameter can be changed with the 'change-encryption-status' RPC request.","Following request is used to enable encryption:","After calling this command, all UniConfig instances will set this parameter using the notification service to the value which is sent via RPC, in this case it will be set to value true.","Following request is used to disable encryption:","Following request is used to check actual encryption status:","To check the functionality of this RPC, after calling install-device RPC we can request the password of the node, which when encryption is enabled will be returned encrypted, and when it is disabled password will be plain text."]},{"i":"change-encryption-keys-private-and-public","l":"Change encryption keys (private and public)","p":["In case it is necessary to change the encryption keys, there is RPC change-encryption-keys. The process of changing encryption keys requires rebooting one of the instances of UniConfig or enabling a new instance of UniConfig after calling change-encryption-keys RPC(rotation of encrypted data in the database for new encryption keys occurs when UniConfig is started if RPC change-encryption-keys is executed). During key rotation, if some data in the database cannot be decrypted with the old key, those data will remain unchanged.","The default value of 'new-encryption-cipher-type' parameter is value 'RSA', so there is no need to add this parameter to the request body.","To check if UniConfig needs to be restarted or if a new UniConfig instance needs to be added, the following query can be run:","After key rotation when UniConfig is started, the data encrypted using the old key will be overwritten with the new encryption keys and all other UniConfig instances in the cluster will use the new keys for encryption.","During key rotation, UniConfig reads and updates encrypted configurations in batch groups. The size of these groups can be set with the parameter:"]},{"l":"Device installation","p":["There are 2 settings related to encryption in the 'install-node' RPC request:","uniconfig-config:crypto - It allows specifying path to public key on device - 'public-key-path' (leaf with RFC-8040 path) and cipher type (by default, RSA is used) - 'public-key-cipher-type'. If path to public key is specified, and it exists on device, then Global-device encryption model is used. Otherwise, Global-only encryption model is selected.","netconf-node-topology:yang-module-capabilities - If autoload of YANG module with encrypted paths is not used and device itself doesn't specify encrypted leaves, then it is necessary to side-load YANG module with encrypted paths. This parameter is relevant only on NETCONF nodes. Side-loaded modules must be expressed in the format of NETCONF capabilities.","Following request shows install-node request with specified both path to public key and side-loaded YANG module'encrypted-paths' with revision '2021-12-15' and namespace 'urn:ietf:params:xml:ns:yang:encrypted-paths'.","During installation, UniConfig tries to download public key from device. Public key can be verified using GET request:"]},{"l":"Format of encrypted data","p":["Encrypted values are stored and displayed via RESTCONF or UniConfig shell with the 'rsa_' prefix. The prefix is used by UniConfig to see if posted data is encrypted already or needs to be encrypted.","The encrypted string is encoded using Base64 encoding."]},{"i":"example-global-device-model","l":"Example: global-device model","p":["The next use-case shows encryption of values marked by 'frinx-encrypt:encrypt' extension on both UniConfig server side and device side. NETCONF device directly exposes 'frinx-encrypt' YANG module and leaves with applied extension(side-loading of encrypted paths is not necessary).","Used YANG model for simulation of YANG device:"]},{"i":"example-global-only-model","l":"Example: global-only model","p":["The next use-case shows encryption of values marked by 'frinx-encrypt:encrypt' extension only on UniConfig server side. NETCONF device directly exposes 'frinx-encrypt' YANG module and leaves with applied extension(side-loading of encrypted paths is not necessary).","Used YANG model for simulation of YANG device is same as in the previous use-case."]},{"l":"Data hashing","p":["UniConfig supports 'iana-crypt-hash' YANG model for specification of hashed values in data-tree using type definition'crypt-hash'. Hashing works in the 'uniconfig' and 'unistore' topologies. Only NETCONF devices are currently supported because CLI cannot be natively used for reporting of device capabilities that would contain supported hashing function."]},{"l":"Architecture","p":["Hashing is done only in the RESTCONF layer after writing some data that contains leaves/leaf-lists with 'crypt-hash' type. Afterwards, UniConfig stores, uses, and writes to device only hashed representation of these values.","Hashing model"]},{"i":"yang-support-1","l":"YANG support","p":["YANG module 'iana-crypt-hash':","http://www.iana.org/assignments/yang-parameters/iana-crypt-hash@2014-08-06.yang","All 3 hash functions are implemented - 'MD5', 'SHA-256', 'SHA-512'. In case of 'uniconfig' topology, hashing function is selected based on reported feature in the NETCONF capability, in case of 'unistore' topology, UniConfig enforces 'SHA-512' hashing function."]},{"i":"device-installation-1","l":"Device installation","p":["Hashing is enabled by default on NETCONF devices that reports corresponding 'iana-crypt-hash' model-based capability. User doesn't have to add entry setting in the 'install-node' request.","After successful installation of device, it is possible to check loaded hashing function that will be used for storing of hashed values. Use following GET request:"]},{"i":"example-hashing-input-values","l":"Example: hashing input values","p":["This example demonstrates hashing of input values with 'crypt-hash' type on RESTCONF API."]}],[{"l":"Logging Framework"},{"l":"Logback Configuration","p":["UniConfig distribution uses Logback as the implementation of the logging framework. Logback is the successor to to the log4j framework with many improvements such as; more options for configuration, better performance, and context-based separation of logs. Context-based separation of logs is used widely in UniConfig to achieve per-device logging based on the set marker in the logs.","Logback configuration is placed in 'config/logback.xml' file under UniConfig distribution. For more information about formatting of logback configuration, look at the http://logback.qos.ch/manual/configuration.html site. This section describes parts of the configuration in the context of UniConfig application."]},{"l":"Appenders","p":["The following appenders are used:","'STDOUT': Prints logs into the console.","'logs': Used for writing all logs to the output file on path'log/logs.log'. The rolling file appender is applied.","'netconf-notifications', 'netconf-messages', 'netconf-events', and'cli-messages', 'gnmi-messages': Sifting appenders that split logs per node ID that is set in the marker of the logs. Logs are written to different subdirectories under 'log' directory and they are identified by their node ID. The rolling file appender is applied.","'restconf': Appender used for writing of RESTCONF messages into'log/restconf.log' file. The rolling file appender is applied.","'gnmi': Appender used for writing of Logs related to gNMI topology."]},{"l":"Loggers","p":["There are 2 groups of loggers:","Package-level logging brokers: Loggers that are used for writing general messages into the console and a single output file. Logging level is set by default to 'INFO'. For debugging purposes it is handy to change logging threshold to 'TRACE' or 'DEBUG' level. Covered layers: UniConfig, Unified, Controller, RESTCONF, CLI, NETCONF, gNMI. Used appenders: 'STDOUT' and 'logs'.","Loggers used for logging brokers: These loggers should not be changed since the state of logging can be changed using RPC calls. Classpaths point to specific classes that represent implementations of logging brokers, the logging level is set to 'TRACE'. Used appenders: 'netconf-notifications', 'netconf-messages','netconf-events', 'cli-messages', 'gnmi-messages' and 'restconf'."]},{"l":"Updating Configuration","p":["Logback is configured to scan for changes in its configuration file and automatically reconfigure itself when the configuration file changes.","Scanning period is set by default to 5 seconds."]},{"l":"Example configuration","p":["In the logback.xml file you can edit level of logging for each component of UniConfig:"]},{"l":"INFO","p":["This is recommended level for production environments. INFO messages display behavior of applications. They state what happened. For example, if a particular service stopped or started or you added something to the database. These entries are nothing to worry about during usual operations. The information logged using the INFO log is usually informative, and it does not necessarily require you to follow up on it."]},{"l":"DEBUG","p":["With DEBUG, you are giving diagnostic information in a detailed manner. It is verbose and has more information than you would need when using the application. DEBUG logging level is used to fetch information needed to diagnose, troubleshoot, or test an application. This ensures a smooth running application."]},{"l":"TRACE","p":["The TRACE log level captures all the details about the behavior of the application. It is mostly diagnostic and is more granular and finer than DEBUG log level. This log level is used in situations where you need to see what happened in your application."]},{"l":"Logging Brokers","p":["The logging broker represents a configurable controller that logs one logical group of messages from a single classpath. Logging of multiple messages from the same classpath simplifies configuration of loggers in Logback since only one logger per broker must be specified. The logging broker can be controlled using RESTCONF RPCs; there are multiple operations where it is possible to trigger logging for the whole broker, or just for specified node IDs. Configuration of the logger in the logback file that is assigned to the logging broker should not be changed at all."]},{"l":"Implemented Logging Brokers","p":["The following subsections describe currently implemented logging brokers."]},{"l":"RESTCONF","p":["It is used for logging authenticated HTTP requests and responses; information about URI, source, HTTP method, query parameters, HTTP headers, and body.","Per-device logging cannot be enabled for this broker; all logs are saved to 'log/restconf.log' file.","It is possible to configure HTTP headers in which the content must be masked in logs (using asterisk characters). This is useful especially if there are some headers which contain private data(such as Authorization or a Cookie header). Hidden HTTP headers are marked using header identifiers.","It is also possible to configure HTTP methods for which the communication (requests and responses) should not be logged to a file.","Requests and responses are paired using a unique message-id. This message-id is not part of the HTTP request, it is generated on the RESTCONF server.","Requests and responses contain Uniconfig transactions for easier matching with the log-transactions.","Example: - Request and corresponding response with the same message-id"]},{"l":"CLI messages","p":["Broker used for logging of all CLI requests and responses.","These CLI requests and responses are paired with unique message-id attribute, which is generated.","Per-device logging is supported - logs for CLI messages are stored under 'log/cli-messages' directory and named by '[node-id].log' pattern.","Example - sending POST RPC for installing CLI device, and getting requests with corresponding responses paired with same Message-ID:"]},{"l":"NETCONF Messages","p":["A broker is used for logging of all NETCONF messages incoming or outgoing, except the NETCONF notifications (a distinct broker has been introduced for notifications).","NETCONF RPC's and responses can be matched using the 'message-id' attribute that is placed in the RPC header.","Per-device logging is supported, logs for NETCONF messages are stored under the directory 'log/netconf-messages' and named by the'[node-id].log' pattern.","Example: - Sending NETCONF GET RPC and receiving response","Number 641 represents the session ID. It is read from the NETCONF hello message. If multiple sessions are created between the NETCONF server and NETCONF client and are logically grouped by the same node ID, then logs from multiple sessions are stored to the same logging file (this is needed to distinguish between the sessions). Multiple NETCONF sessions between the UniConfig and NETCONF server are created for each subscription to the NETCONF stream."]},{"l":"NETCONF Notifications","p":["A broker is used for logging of incoming NETCONF notifications.","Per-device logging is supported, logs for NETCONF notifications are stored under the directory 'log/netconf-notifications' and named by the '[node-id].log' pattern.","Example: - Received two notifications"]},{"l":"NETCONF Events","p":["Logs generated by this broker contain session-related information about the establishment or closing of a NETCONF session from the view of the NETCONF client placed in UniConfig.","These logs don't contain full printouts of sent or received NETCONF messages.","Per-device logging is supported, logs for NETCONF events are stored under the directory 'log/netconf-events' and named by the'[node-id].log' pattern.","Example:"]},{"l":"gNMI Messages","p":["A broker is used for logging of all gNMI SET/GET messages incoming or outgoing, except the gNMI notifications.","Per-device logging is supported, logs for gNMI messages are stored under the directory 'log/gnmi-messages' and named by the'[node-id].log' pattern.","Example: - Sending gNMI SET request and receiving response"]},{"l":"Supported Logging Settings","p":["Current logging broker settings are stored in the Operational datastore under the 'logging-status' root container. The following example shows a GET query that displays the logging broker settings:","Response:","Logging settings are encapsulated inside multiple list entries ('broker' list) where each list entry contains settings for one logging broker. Description of the settings that are placed under a single logging entry:","broker-identifier: Unique identifier of the logging broker. Currently, 5 brokers are supported: 'netconf_messages', 'restconf','netconf_notifications', 'netconf_events', and cli_messages.","is-logging-broker-enabled: Flag that specifies whether the logging broker is enabled. If the logging broker is disabled, then no logging messages are generated.","is-logging-enabled-on-all-devices: If this flag is set to'true', then logs are separated to distinct files in the scope of all devices. If it is set to 'false', then logging is enabled only for devices that are listed in the 'enabled-devices' leaf-list / array. This setting is unsupported in the 'restconf' logging broker since RESTCONF currently doesn't differentiate the node ID in the requests or responses.","enabled-devices: If 'is-logging-enabled-on-all-devices' is set to 'false', then logs are generated only for devices that are specified in this list, it acts as a simple filtering mechanism based on the whitelist. Blacklist approach is not supported, it is not possible to set 'is-logging-enabled-on-all-devices' to 'true' and specify devices for which logging feature is disabled. This field is not supported in the 'restconf' logging broker.","RESTCONF-specific settings:","restconf-logging:hidden-http-methods- HTTP requests (and associated HTTP responses) are not logged if request's HTTP method is set to one of the methods in this list. Names of the HTTP methods must be specified using upper-case format.","restconf-logging:hidden-http-headers- List of HTTP headers(names of the headers) which content is hidden in the logs. Names of the HTTP headers are not case-sensitive.","GNMI-specific settings:","gnmi-logging:message-types- gNMI message types that are enabled to be logged. Names of the message types must be specified using upper-case format.","Global settings that are common in all logging brokers:","hidden-types- Value of leaf or leaf-list that uses one of these types is hidden in the logs using asterisk characters. It can be used for masking of passwords or other confidential data from logs."]},{"l":"Initial Configuration","p":["By default, all logging brokers are disabled and logging is disabled on all devices, the user must explicitly specify a list of devices for which per-device logging is enabled. Also, RESTCONF-specific filtering is not configured, all HTTP requests and responses are fully logged, no content is dismissed. By default, only SET gNMI message type is set to be logged.","Initial logging configuration can be adjusted by adding the'loggingController' configuration into the'config/lighty-uniconfig-config.json' file. The structure of this configuration section conforms YANG structure that is described by the'logging' and 'restconf-logging' modules, it is possible to copy the state of the Operational datastore under 'logging-status' into the'loggingController' root JSON node.","The next JSON snippet shows the sample configuration'loggingController', logging brokers 'netconf_messages' and'netconf_notifications' are enabled; the 'netconf_messages' broker is enabled for all devices while 'netconf_notifications' is enabled only for 'xr6' and 'xr7' devices.","If unknown parameters are specified in a configuration file, they will be ignored and a warning, that the corresponding parameter was ignored, will be logged."]},{"l":"Controlling of Logging Using RPC Calls","p":["Since logging settings are stored in the Operational datastore, it is possible to adjust these settings on runtime only using RPC calls. The following subsections describe available RPCs."]},{"l":"Enable Logging Broker","p":["An RPC is used for enabling the logging broker. The enabled logging broker is available to write logs.","The input contains only the name of the the logging broker,'broker-identifier'.","Example: - Enable logging broker with the identifier 'restconf'","The output shows a positive response given the broker was previously in a disabled state:"]},{"l":"Disable Logging Broker","p":["An RPC is used for turning off the logging broker. A disabled logging broker doesn't write any logs despite other settings.","The input contains only the name of the the logging broker,'broker-identifier'.","Example: - Disabling the logging broker with the identifier 'restconf'","The output shows a positive response given the broker was previously in an enabled state:"]},{"l":"Enable Default Device Logging","p":["An RPC is used for setting the default device logging to 'true', logs will be written for all devices without filtering any logs based on their node ID.","The input contains only the name of the the logging broker,'broker-identifier'.","Invocation of this RPC causes clearing of the leaf-lest'enabled-devices'.","Example: - Enable default device logging in the 'netconf_messages' logging broker","The output shows a positive response given the broker was previously in a disabled state:"]},{"l":"Disable Default Device Logging","p":["An RPC is used for setting default device logging to 'false', logs will be written only for devices that are named in the leaf-list'enabled-devices'. If the leaf-list 'enabled-devices' doesn't contain a node ID, then logging in the corresponding logging broker is effectively turned off.","The input contains only the name of the the logging broker,'broker-identifier'.","Example: - Disable default device logging in 'netconf_messages' logging broker","The output shows a positive response given the broker was previously in an enabled state:"]},{"l":"Enable Device Logging","p":["An RPC is used for enabling logging of specified devices that are identified by node IDs.","The input contains the name of the the logging broker'broker-identifier' and a list of node IDs called 'device-list'.","Example: - Enable logging for devices with node IDs: 'node1', 'node2', and 'node3' in the 'netconf_events' logging broker","The output shows a positive response:"]},{"l":"Disable Device Logging","p":["An RPC is used for turning off logging of specified devices that are identified by node IDs.","The input contains the name of the the logging broker'broker-identifier' and a list of node IDs called 'device-list'.","Example: - Disable logging for device with node ID 'node1' in the 'netconf_events' logging broker","The output shows a positive response:"]},{"l":"Setting Global Hidden Types","p":["An RPC is used for setting identifiers of hidden YANG type definitions. Values of leaves and leaf-lists that are described by these types are masked in the output logs.","This RPC overwrites all already configured hidden types. An empty list of hidden types disables filtering of data values.","Filtering of values applies to all logs, including RESTCONF logs.","Example: - Setting 3 hidden types","The output shows a positive response:"]},{"l":"Setting Hidden HTTP Headers","p":["An RPC is used for overwriting the list of HTTP headers which content is masked in the output of the RESTCONF logs.","This RPC modifies behavior of only the 'restconf' logging broker.","HTTP headers in both requests and responses are masked.","The list of hidden HTTP headers denotes header identifiers.","The identifier of 'hidden' HTTP header still shows in the output logs, however, the content of such header is replaced by asterisk characters.","Example: - Hiding content of 'Authorization' and 'Cookie' HTTP headers","A positive response is shown in the output:"]},{"l":"Setting Hidden HTTP Methods","p":["An RPC is used for overwriting the list of HTTP methods. RESTCONF communication, that may include invocation of hidden HTTP methods, is not displayed in the output logs.","Both requests and responses with hidden HTTP methods are not written to the log files.","This RPC modifies behavior of only 'restconf' logging behaviour.","Example: - Hiding GET and PATCH communication in the RESTCONF logs","A positive response is shown in the output:"]},{"l":"Setting gNMI message types","p":["An RPC is used for overwriting the list of supported gNMI message types.","This RPC modifies behavior of only 'gnmi messages' logging behaviour.","Example: - Setting SET and GET message types","A positive response is shown in the output:"]}],[{"l":"OpenAPI","p":["The OpenAPI file located in the openapi folder contains all the RPCs and data manipulating requests (CRUD operations), and their respective examples. A shell script (named start_uniconfig_swagger.sh) was created that automatically checks if the file is present and runs it in a docker container where the Swagger API runs, and opens the file containing all the RPCs and data manipulating requests. After running the shell script, open any browser and type localhost in the URL bar.","Overview of our OpenAPI along with all parameters and expected returns can be found here","The website should look like on the screenshot below:","openapi website","Alternatively, you can look at our live instance of the site that always displays latest version of the API."]}],[{"l":"Thread pools","p":["There are several thread pools that can be configured in UniConfig:","Jetty server,","Task executor,","Notifications,","SSH Client,","NetConf topology,","CLI topology."]},{"l":"Jetty server","p":["Jetty server is used to aggregate connectors (HTTP request receivers) and request handlers. Connectors use the thread pool methods to run jobs that will eventually call the handle method.","Available parameters to configure:","jetty.max-threads=200","The maximum number of threads available in the jetty server. The default value is 200.","jetty.min-threads=8","The minimum number of threads available in the jetty server. The default value is 80.","jetty.idle-timeout=60","Threads that are idle for longer than this period (in seconds) can be stopped. The default value is 60.","If any of these parameters are left empty (e.g. jetty.max-threads=), the default value is used."]},{"l":"Task Executor","p":["The task executor is used to execute operations (internal operations or RPCs), either synchronously or asynchronously, on given nodes or devices.","task-executor.max-queue-capacity=10000","The maximum queue capacity for postponed tasks. The default value is 10000.","task-executor.max-cpu-load=0.9","The maximum CPU load for executing tasks. Load is expressed as a ratio so that 1.0 corresponds to 100% load, 0.9 to 90%, etc. The default value is 0.9.","task-executor.default-thread-count=","The efault thread count used for executing tasks. The default value is the number of available processors * 2.","task-executor.max-thread-count=","The maximum thread count used for executing tasks. The default value is default-thread-count* 20.","task-executor.keepalive-time=60","The time in seconds before the execution of a specified task is timed out. The default value is 60.","If any of these parameters are left empty (e.g. task-executor.default-thread-count=), the default value is used."]},{"l":"Notifications","p":["A NetConf related thread pool that handles notification subscriptions (acquiring of subscriptions, release of subscriptions, etc.).","notifications.thread-parameters.monitoring-executor-initial-pool-size=","The initial thread count used by the monitoring executor. The default value is the number of available processors.","notifications.thread-parameters.monitoring-executor-maximum-pool-size=","The maximum thread count used by the monitoring executor. The default value is initial-pool-size* 4.","notifications.thread-parameters.monitoring-executor-keepalive-time=60","The time in seconds before the execution of a specified task is timed out in the monitoring executor. The default value is 60.","If any of these parameters are left empty (e.g. notifications.thread-parameters.monitoring-executor-initial-pool-size=), the default value is used."]},{"l":"SSH Client","p":["SSH Client uses a thread pool that handles communication with devices. This thread pool is shared between NetConf and CLI topologies.","ssh-client.default-timeout=-1","Timeout for SSH connections (in seconds). If set to a negative value, timeouts are disabled. The default value is -1.","ssh-client.heartbeat-interval=30","The interval (in seconds) at which the client pings the server to check if the connection is still alive. The default value is 30.","ssh-client.heartbeat-reply-wait=60","Indicates if the heartbeat request expects a reply. Time (in seconds) to wait for a reply, a non-positive value means that no reply is expected. The default value is 60.","ssh-client.heartbeat-request=keepalive@sshd.apache.org","The heartbeat request that is sent to the server. The default value is keepalive@sshd.apache.org.","ssh-client.ssh-default-nio-workers=8","The amount of non-blocking workers that handle communication messages. The default value is 8.","If any of these parameters are left empty (e.g. ssh-client.ssh-default-nio-workers=), the default value is used."]},{"l":"NetConf Topology","p":["NetConf topology thread pools are used to connect to NetConf devices and keep the connection alive.","netconf-topology-parameters.fixed-thread-pool-thread-count=2","The fixed thread pool thread count in the NetConf topology. Used to read device capabilities and schema set up. The default value is 2.","netconf-topology-parameters.scheduled-thread-pool-thread-count=2","The scheduled thread pool thread count in the NetConf topology. Used to schedule keepalive messages. The default value is 2.","If any of these parameters are left empty (e.g. netconf-topology-parameters.fixed-thread-pool-thread-count=), the default value is used."]},{"l":"CLI Topology","p":["CLI topology thread pools are used to connect to CLI devices and keep the connection alive.","cli-topology-parameters.keepalive-thread-count=","The thread pool count dedicated ONLY to keepalive and reconnect scheduling. The default is either 2 or the number of available processors, whichever is higher.","cli-topology-parameters.init-executor-thread-timeout=120","If any thread is unused for this period (in seconds), it is stopped and recreated in the future if necessary.","cli-topology-parameters.init-executor-thread-count=","The maximum, number of threads for the flexible thread pool executor. This thread pool is used to process events and asynchronous locking of the CLI layer. The default is the number of available processors * 8.","If any of these parameters are left empty (e.g. cli-topology-parameters.keepalive-thread-count=), the default value is used."]}],[{"l":"TLS encryption for Postgres database","p":["By default all the communication to the database is not encrypted. In deployments where UniConfig is running separately from database, the traffic might be visible to unwanted eyes. Here are the steps to enabling TLS encryption for communication with the database."]},{"l":"Generating self-signed certificate using OpenSSL","p":["If you already have SSL keys generated, you need to convert them to proper format, see Converting SSL keys to proper format, otherwise you need to generate them."]},{"l":"Converting SSL keys to proper format","p":["The proper format for the SSL keys is the following:","The command which needs to be used to convert the keys properly may differ based on the format of the keys in which they are available. They can be converted using OpenSSL version 1.1.1, from command line openssl command. OpenSSL documentation provides examples for the most common cases.","To convert to PKCS-8 DER binary format, consult the documentation here: PKCS-8","To convert to PKCS-12 format, consult the documentation here: PKCS-12"]},{"l":"Enabling TLS for the database connection","p":["The configuration file that must be modified can be found on the following path relative to the UniConfig root directory:","Then edit the configuration section in dbPersistence section.","Example:","The TLS related fields are the following:","enabledTls- setting to true enables TLS encryption, default is false","tlsClientCert- specify the relative path to the Client certificate from the root UniConfig directory","tlsClientKey- specify the relative path to the Client key from the root UniConfig directory, this can be PKCS-12 or PKCS-8 format","tlsCaCert- specify the relative path to the root CA certificate from the root UniConfig directory","sslPassword- if the tlsClientKey file is encrypted with password, specify it here. It is needed for PKCS-12 keys and for encrypted PKCS-8 keys, this will be ignored for the unencrypted keys.","Do not forget to adjust other database connection parameters accordingly."]}],[{"l":"TLS-based Authentication","p":["In the default version of UniConfig TLS authentication is disabled. To enable TLS for RESTCONF you must setup two things:","Key-store and trust-store that hold all keys and certificates. If authentication of individual clients is not required, trust-store doesn't have to be created at all. Key-store must always be initialized.","Enabling of TLS in UniConfig by editing the lighty configuration file."]},{"l":"Setting of Key-store and Trust-store","p":["Steps required for preparation of key-store and trust-store:","Create a directory under the UniConfig root directory that will contain key-store and optionally trust-store files, for example:","Create a new key-store. There are two options depending on whether you already own the certificate that you would like to use for the identification of UniConfig on the RESTCONF layer.","Create a new key-store with the generated RSA key-pair (in the example the length of 2048 and validity of 365 days is used). After execution of the following command, the prompt will ask you for information about currently generated certificate that will be pushed into the newly generated key-store secured by a password(this secret will be used later in the configuration file - remember it).","Create a new key-store with already generated RSA key-pair (your certificate that you would like to use for authentication in ODL).","(Optional step) Create a new trust-store using an existing certificate (an empty truststore cannot be created). If you have multiple client certificates, they can be pushed to truststore with the same command executed multiple times (but alias must be unique for each of the imported certificates). Example:","You can easily convert OPENSSL PEM certificates to DER format that is supported by keytool:","If your application needs to own distribution's certificate, you can export certificate from generated key-pair that we have pushed into the keystore (PKCS12 or OPENSSL format):"]},{"l":"Enabling of TLS in UniConfig","p":["Preparation of the TLS key-store and trust-store is not enough for enabling TLS within the RESTCONF API. It is also required to point UniConfig to these created storages and explicitly enable TLS by setting a corresponding flag. The configuration file that must be modified can be found on the following path relative to the UniConfig root directory:","Then, you must append the TLS configuration snippet (it must be placed under the root JSON node) to the configuration file. The following example snippet enables TLS authentication, disables user-based authentication (hence trust-store is not required at all), and points UniConfig to the key-store file that we have created in the previous section.","If your deployment requires authentication of individual RESTCONF users as well, you should also specify the trust-store fields by setting the'enabledClientAuthentication' field to 'true'.","You can also specify included or excluded cipher suites and TLS versions that can or cannot be used for establishing a secured tunnel between the Jetty server and clients. The following default configuration is based on actual recommendations (you should adjust it as needed):","It is enough to specify only the included protocols and included cipher suites (all other entries are denied), or excluded protocols and excluded cipher suites (all other entries are permitted). If you specify the same entries under both the included and excluded cipher suites or protocols, the excluded entry has higher priority. For example, the final set of usable cipher suites is: setOf(includedCipherSuites), setOf(excludedCipherSuites)."]}],[{"l":"UniConfig Clustering"},{"l":"Introduction","p":["UniConfig can be easily deployed in the cluster thanks to its stateless architecture and transaction isolation:","stateless architecture - UniConfig nodes in the cluster don't keep any state that would have to be communicated directly to other Uniconfig nodes in a cluster. All network-topology configuration and state information are stored inside a PostgreSQL database that must be reachable from all UniConfig nodes in the same zone. All Uniconfig nodes share the same database, making the database single source of truth for Uniconfig cluster.","transaction isolation - Load-balancing is based on mapping UniConfig transactions to Uniconfig nodes in a cluster (transactions are sticky). One UniConfig transaction cannot span multiple UniConfig nodes in a cluster. Southbound sessions used for device management are ephemeral - they are created when UniConfig needs to access device on the network (like pushing cnfiguration updates) and they are closed as soon as a UniConfig transactions is committed or closed.","There are several advantages of clustered deployment of UniConfig nodes:","horizontal scalability - Increasing number of units that can process UniConfig transactions in parallel. Single UniConfig node tends to have limited processing and networking resources - by increasing number of nodes in the cluster, this constraint can be mitigated. The more Uniconfig nodes in a cluster, the more transactions can be executed in parallel. Number of connected UniConfig nodes in the cluster can also be adjusted at the runtime.","high-availability - Single UniConfig node doesn't represent single point of failure. If UniConfig node crashes, only UniConfig transactions that are processed by corresponding node, are cancelled. Application can retry failed transaction, and it will be processed by next node in the cluster.","There also are a couple limitations to be considered:","Parallel execution of transactions is subject to a locking mechanism, where 2 transactions cannot manipulate the same device at the same time.","Single transaction is always executed by a single Uniconfig node. This means that a scope of a single transaction is limited by the number devices and their configuration a single Uniconfig node can handle."]},{"l":"Deployments"},{"l":"Single-zone deployment","p":["In the single-zone deployment, all managed network devices are reachable by all UniConfig nodes in the cluster - zone. Components of the single-zone deployment and connections between them are depicted by the next diagram.","Deployment with single zone","Description of components:","UniConfig controllers - Network controllers that use common PostgreSQL system for persistence of data, communicate with network devices using NETCONF/GNMi/CLI management protocols and propagate notifications into Kafka topics(UniConfig nodes act only as Kafka producers). UniConfig nodes do not communicate with each other directly, their operation can only be coordinated using data stored in the database.","Database storage - PostgreSQL is used for persistence of network-topology configuration, mountpoints settings, and selected operational data. PostgreSQL database can also be deployed in the cluster (outside of scope).","Message and notification channels - Kafka cluster is used for propagation of notifications that are generated by UniConfig itself (e.g., audit and transaction notifications) or from network devices and only propagated by UniConfig controller.","Load-balancers - Load-balancer is used for distributing transactions (HTTP traffic) and SSH sessions from applications to UniConfig nodes. From the view of load-balancer, all UniConfig nodes in a cluster are equall. Currently, only round-robin load-balancing strategy is supported.","Managed network devices - Devices that are managed using NETCONF/GNMi/CLI protocols by UniConfig nodes or generate notifications to UniConfig nodes. Sessions between UniConfig nodes and devices are either on-demand/emphemeral(configuration of devices) or long-term (distribution of notifications over streams).","HTTP / SSH clients & Kafka consumers - Application layer such as workflow management systems or end-user systems. RESTCONF API is exposed using HTTP protocol, SSH server is exposing UniConfig shell and Kafka brokers allow Kafka consumers to listen to the events on subscribed topics."]},{"l":"Multi-zone deployment","p":["In this type of deployment there are multiple zones that manage separate sets of devices because:","network reachability issues - groups of devices are only reachable and thus manageable from some part of the network (zone) but not from others","logical separation - there are different scaling strategies or requirements for different zones","legal issues - some devices must be managed separately with split storage, for example, because of the regional restrictions","The following diagrams represents a sample deployment with 2 zones. The first zone contains 3 UniConfig nodes while the second zone contains only 2 UniConfig nodes. Multiple zones might share a single Kafka cluster but database instances need to be split (could be running in a single postgres server).","Deployment with multiple zones","Description of multi-zone areas:","Applications - The application layer is responsible for managing mapping between network segments and Uniconfig zones. Typically this is achieved by deploying/using an additional inventory database that contains device <-> zone mappings - based on this information the application decides which zone to use.","Isolated zones - A zone contains one or more UniConfig nodes, load-balancers and managed network devices. The clusters in isolated zones share 0 information.","PostgreSQL databases - It is necessary to use dedicated database per zone.","Kafka cluster - Kafka cluster can be shared by multiple clusters in different zones or there could be single Kafka cluster per zone. Notifications from different zones can be safely pushed to the common topics since there can be no possible conflicts between Kafka publishers. However it is also possible to achieve isolation of published messages in a shared Kafka deployment by setting different topic names in different zones."]},{"l":"Load-balancer operation","p":["The responsibility of a load-balancer is to allocate UniConfig transaction on one of the UniConfig nodes in the cluster. It is done by forwarding requests without UniConfig transaction header to one of the UniConfig nodes(using round-robin strategy) and afterwards appending a backed identifier to the create-transaction RPC response in form of an additional Cookie header ('sticky session' concept). Afterwards, it is the responsibility of an application to assure that all requests that belong to the same transaction contain the same backend identifier.","The application is responsible for preserving transaction and backend identifier cookies throught a transaction lifetime.","The next sequence diagram captures a process of creating and using 2 UniConfig transactions with focus on load-balancer operation.","Load-balancing UniConfig transactions","The first create-transaction RPC is forwarded to the first UniConfig node (applying round-robin strategy), because it does not contain uniconfig_server_id key in the Cookie header. The response contains both UniConfig transaction ID (UNICONFIGTXID) and uniconfig_server_id that represents'sticky cookie'. Cookie header uniconfig_server_id is appended to the response by load-balancer.","The next request that belongs to the created transaction, contains same UNICONFIGTXID and uniconfig_server_id. Load balancer uses the uniconfig_server_id to forward this request to the correct UniConfig node.","The last application request represents again create-transaction RPC. This time, request is forwarded to the next registered UniConfig node in the cluster according to the round-robin strategy."]},{"l":"Configuration"},{"l":"UniConfig configuration","p":["All UniConfig nodes in the cluster should be configured with the same parameters. There are several important sections of config/lighty-uniconfig-config.json file related to clustered environment."]},{"l":"Database connection settings","p":["This section contains information how to connect to PostgreSQL database and connection pool settings. It is placed under 'dbPersistence.connection' JSON object.","Example with essential settings:","Be sure that [number of UniConfig nodes in cluster] * [maxDbPoolSize] does not exceed maximum allowed number of open transactions and open connections on PostgreSQL side. Be aware that 'maxDbPoolSize' also caps maximum number of open UniConfig transactions (1 UniConfig transaction == 1 database transaction == 1 connection to database)."]},{"l":"UniConfig node identification and heartbeat","p":["By default, UniConfig node name is generated randomly. This behaviour can be changed by setting'dbPersistence.uniconfigInstance.instanceName'. Instance name is leveraged, for example, in the clustering of stream subscriptions.","UniConfig nodes reports themselves in the cluster by updating heartbeat timestamp in database. Currently, this feature is not used by any other component in the UniConfig cluster. Reporting interval can be adjusted by 'dbPersistence.heartBeat.heartbeatInterval' field.","Example:"]},{"l":"Kafka and notification settings","p":["This section contains settings related to connections to Kafka brokers, Kafka publisher timeouts, authentication, subscription allocation, and rebalancing settings.","Example with essential settings:"]},{"l":"Load-balancer configuration","p":["The following YAML code represents sample Traefik configuration that can be used in the clustered UniConfig deployment(deployment with 1 Traefik node). There is one registered entry-point with identifier 'uniconfig' and port '8181'.","Next, it is needed to configure UniConfig docker containers with traefik labels - UniConfig nodes are automatically detected by Traefik container as 'uniconfig' service providers. There is also URI prefix '/rests', name of the'sticky cookie' 'uniconfig_server_id' and server port number '8181' (UniConfig web server is listening to incoming HTTP requests on this port).","Values of all traefik labels should be same on all nodes in the cluster - scaling of UniConfig service in the cluster(for example, using Docker Swarm tools) is simple since container settings do not change.","The similar configuration, like the presented one with Traefik, can be achieved using other load-balancer tools, such as HAProxy."]},{"l":"Clustering of NETCONF subscriptions and notifications","p":["When device is installed with stream property set, subscriptions for all provided streams are created in database. These subscriptions are always created with UniConfig instance id set to null, so they can be acquired by any UniConfig from cluster. Each UniConfig instance in cluster uses its own monitoring system to acquire free subscriptions. Monitoring system uses specialized transaction to lock subscriptions which prevents more UniConfig instances to lock same subscriptions. While locking subscription, UniConfig instance writes its id to subscription table to currently locked subscription and which means that this subscription is already acquired by this UniConfig instance. Other instances of UniConfig will not find this subscription as free anymore."]},{"l":"Optimal subscription count and rebalancing","p":["With multiple UniConfig instances working in a cluster, each instance calculates an optimal range of subscriptions to manage.","Based on optimal range and a number of currently opened subscriptions, each UniConfig node (while performing a monitoring system iteration) decides whether it should:","Acquire additional subscriptions before optimal range is reached","Stay put and not acquire additional subscriptions in case optimal range is reached","Release some of its subscriptions to trigger rebalancing until optimal range is reached","When an instance goes down, all of its subscriptions will be immediately released and the optimal range for the other living nodes will changemanaged network devices and thus the subscriptions will be reopened by the rest of the cluster.","There is a grace period before the other nodes take over the subscriptions. So in case a node goes down and up quickly, it will restart the subscriptions on its own.","Following example illustrates a timeline of a 3 node cluster and how many subscriptions each node handles:","notifications-in-cluster-rebalancing","The hard limit still applies in clustered environment and it will never be crossed, regardless of the optimal range."]}],[{"l":"Uniconfig properties","p":["UniConfig can be extensively configured using application properties located in the application.properties file.","Application properties can be separated into three groups:","Runtime mutable properties can be modified in runtime (using the update-properties RPC), their changes take effect in runtime and the properties are persisted in the database.","Database persisted properties include all runtime mutable properties and some additional properties. These properties are stored in the database, which is always their primary source. With UniConfig Cloud Config, they remain constant across UniConfig instances in the same cluster and cannot be overridden via the application properties file.","Regular UniConfig properties comprise all the remaining properties. These properties can always be changed using application.properties and can differ between UniConfig instances.","Database persisted properties include the following property prefixes:","crypto","schema-settings","callbacks","notifications.kafka","netconf-default-parameters","gnmi-default-parameters","cli-default-parameters","These properties are stored in the properties table and are also known as default properties. They can be read and updated using the read-properties RPC and update-properties RPC.","After UniConfig is started, if default properties are found in the database, UniConfig will use the values in the database. For properties not found in the database, values from the first UniConfig instance after startup are used (by the application.properties file or env variables) and saved in the database for the next UniConfig instances."]},{"l":"UniConfig Cloud Config","p":["UniConfig Cloud Config is used to retain the same property values between distributed UniConfig instances connected via a message broker. It is largely the same technology as Spring Cloud Config with JDBC backend and Spring Cloud Bus. The main difference is that UniConfig Cloud Config Server and Cloud Config Client are in the same project, while Spring requires a separate Cloud Config Server application.","By calling a special signal (the Refresh Bus Endpoint call) during runtime, the system sets the same value for persisted properties in all UniConfig instances. The signal is called immediately after mutable properties are modified using the update-properties RPC. The specific UniConfig instance calling the signal sends Kafka events containing the changed properties, while other instances read those properties from the database and use the refresh endpoint to update them in runtime."]},{"l":"UniConfig startup with UniConfig Cloud Config","p":["UniConfig startup with UniConfig Cloud Config: startup-with-ucc","Before starting UniConfig, enable Cloud Config by using the following properties:","On startup, UniConfig checks the database for any default properties to configure:","If default properties are found in the database, UniConfig manually refreshes its property values to use those in the database.","If no default properties are found, UniConfig uses its existing properties and, once loaded, saves them in the database for the next UniConfig instances.","At the end of Spring initialisation, the Refresh Bus Endpoint is called. This refreshes default properties with the database values for all UniConfig instances connected via the Kafka refresh topic. A second refresh during the UniConfig startup cycle is required if several instances were started simultaneously and the database contains no property values to synchronize for properties (especially encryption keys).","At application runtime, if the update-properties RPC is used with default properties on input, UniConfig updates the properties in the database. It also calls the Refresh Bus Endpoint, which reloads properties for all UniConfig instances connected via Kafka."]},{"l":"UniConfig startup without UniConfig Cloud Config","p":["UniConfig startup without UniConfig Cloud Config: startup-without-ucc","Before starting UniConfig, disable Cloud Config by using the following properties:","On startup, UniConfig checks the database for any default properties to configure:","If default properties are found in the database, UniConfig manually refreshes its property values to those in the database.","If no default properties are found, UniConfig uses its existing properties and, once loaded, saves them in the database for the next UniConfig instances.","At the end of Spring initialisation, the Refresh Bus Endpoint is not called.","At application runtime, if the update-properties RPC is used with default properties on input, Uniconfig updates the properties in the database but not inside the application. This will therefore only affect the next UniConfig instance started after the properties are updated."]}],[{"l":"Performance characteristics","p":["This page contains reference performance characteristics for Uniconfig.","We try to answer the question how fast can a certain number of devices with a certain amount of configuration be installed and fully synced by","a single Uniconfig instance","3-node Uniconfig deployment with load balancer","The unit of measurement is: Number of configuration lines / per single CPU core / per minute. This number can then be roughly applied to any other similar device being installed by uniconfig."]},{"l":"CLI devices","p":["There are 2 main families of CLI devices: those using Cisco style configuration (configuration in sections) and devices that use one-line style of configuration (without sections) such as Ciena SAOS 8.","It is important to distinguish performance characteristics of these 2 families."]},{"l":"Netconf devices","p":["// TBD"]},{"l":"Tree-like style of configuration","p":["Cisco style of confituration (IOS, IOS-XR etc.)","// TBD"]},{"i":"one-line-style-of-configuration-devices-saos-performance-tests","l":"One-line style of configuration devices (SAOS) performance tests","p":["Important caveats:","Measurement were performed on simulated devices = no device overhead","Measurement were performed on a local network = no network overhead","Measured on Uniconfig version 5.0.12","Simulated devices were of two flawors: half with small configuration and the half with big configuration","Tests:","Single node deployment of Uniconfig resources: CPU 4 cores and RAM 4 GB","3 node deployment of Uniconfig - resources per node: CPU 4 cores and RAM 4 GB","3 node deployment of Uniconfig - resources per node: CPU 6 cores and RAM 4 GB"]},{"i":"device-installation--synchronization","l":"Device installation & synchronization"},{"i":"test-a---one-node-uniconfig","l":"Test A - one node Uniconfig","p":["Devices running SAOS operating system (Ciena) and similar","Inputs: 375 x SAOS 6 devices configuration: 8834 json lines = 1510 cli config lines (brief config)","375 x SAOS 8 devices configuration: 277375 json lines = 30705 cli config lines (brief config)","Evaluation: 750 devices were registered in 7.5 hours on single node Uniconfig using 4 cores Average one device instalation duration = (7.5 * 60 minutes) / 750 devices = 0.6 minutes Average number of json lines per device = (8834 + 277375)/2 = 143104 lines lines of json / per core / per minute = 143104 lines / 4 cores / 0.6 minutes = 59626 Average number of cli lines per device = (1510 + 30705)/2 = 16107,5 lines lines of cli / per core / per minute = 16107,5 lines / 4 cores / 0.6 minutes = 6711","Installation & sync rate:","59,626 lines of json / per core / per minute","or","6,711 lines of raw cli configuration / per core / per minute","A single uniconfig node is capable of installing (and fully syncing) 100 Ciena (SAOS 8) devices with 15k lines of configuration(~ 123k lines of formatted json in Uniconfig) in 55 minutes using 4 CPU cores","Recommended batch size for parallel installation in such case would be about 50 devices per batch as the parallelism is limited by the number of available cores."]},{"i":"test-b---3-nodes-of-uniconfig-with-load-balancer","l":"Test B - 3 nodes of Uniconfig with load balancer","p":["Devices running SAOS operating system (Ciena) and similar","Inputs: 750 x SAOS 6 devices configuration: 8834 json lines = 1510 cli config lines (brief config)","750 x SAOS 8 devices configuration: 277375 json lines = 30705 cli config lines (brief config)","Evaluation for 4 core deployment: 1500 devices were registered in 5.5 hours on 3 node Uniconfig deployment each using 4 cores Average one device instalation duration = (5.5 * 60 minutes) / 1500 devices = 0.22 minutes Average number of json lines per device = (8834 + 277375)/2 = 143104 lines lines of json / per core / per minute = 143104 lines / 3 4 cores / 0.22 minutes = 54206 Average number of cli lines per device = (1510 + 30705)/2 = 16107,5 lines lines of cli / per core / per minute = 16107,5 lines / 3 4 cores / 0.22 minutes = 6101","Installation & sync rate:","54,206 lines of json / per core / per minute","or","6,101 lines of raw cli configuration / per core / per minute","A 3 nodes of uniconfig with loadbalancer are capable of installing (and fully syncing) 100 Ciena (SAOS 8) devices with 15k lines of configuration(~ 123k lines of formatted json in Uniconfig) in 19 minutes using 12 CPU cores","Recommended batch size for parallel installation in such case would be about 150 devices per batch as the parallelism is limited by the number of available cores."]},{"i":"test-c---3-nodes-of-uniconfig-with-load-balancer","l":"Test C - 3 nodes of Uniconfig with load balancer","p":["Devices running SAOS operating system (Ciena) and similar","Inputs: 750 x SAOS 6 devices configuration: 8834 json lines = 1510 cli config lines (brief config)","750 x SAOS 8 devices configuration: 277375 json lines = 30705 cli config lines (brief config)","Evaluation for 6 core deployment: 1500 devices were registered in 3.7 hours on 3 node Uniconfig deployment each using 6 cores Average one device instalation duration = (3.7 * 60 minutes) / 1500 devices = 0.148 minutes Average number of json lines per device = (8834 + 277375)/2 = 143104 lines lines of json / per core / per minute = 143104 lines / 3 6 cores / 0.148 minutes = 53717 Average number of cli lines per device = (1510 + 30705)/2 = 16107,5 lines lines of cli / per core / per minute = 16107,5 lines / 3 6 cores / 0.148 minutes = 6046","Installation & sync rate:","53,717 lines of json / per core / per minute","or","6,046 lines of raw cli configuration / per core / per minute","A 3 nodes of uniconfig with loadbalancer are capable of installing (and fully syncing) 100 Ciena (SAOS 8) devices with 15k lines of configuration(~ 123k lines of formatted json in Uniconfig) in 13 minutes using 18 CPU cores","Recommended batch size for parallel installation in such case would be about 150 devices per batch as the parallelism is limited by the number of available cores."]}],[{"l":"Monitoring"},{"l":"Monitoring using Metrics","p":["UniConfig exposes multiple metrics to monitor the traffic and other useful values to monitor its performance. Output can be in form of plaintext log messages in the log file metrics.log in the log directory in the root of the distribution or in the form of raw data in CSV format from which it can be further processed by the 3rd party visualization tools. CSV files are located in the metrics directory in the root of the distribution."]},{"l":"Types of metrics","p":["Gauge - reports instantanious value at a point in time (for example queue size)","Meter - measures total count of event occurences, total mean rate, mean rates for past 1, 5 and 15 minutes time windows"]},{"l":"List of notable metrics exposed by UniConfig","p":["Gauges","io.frinx.uniconfig.manager.impl.task.TaskExecutorImpl.queue_size - The number of tasks in the queue waiting for execution","org.apache.sshd.server.SshServer.active_sessions - The number of active CLI sessions","org.opendaylight.controller.uniconfig.transaction.manager.api.UniconfigTransactionManager.open_transaction_count - The number of open transactions","Meters","org.opendaylight.yangtools.yang.common.RpcResult.rpc_invoke - All the invoked RPCs by Uniconfig","org.opendaylight.controller.uniconfig.transaction.manager.impl.UniconfigTransactionManagerImpl.transaction_invoke - All the invoked transactions in Uniconfig","io.frinx.uniconfig.shell.cli.SshTerminal.cli_message - All the invoked commands in Uniconfig CLI shell"]},{"l":"Configuration","p":["Configuration is done via a section in \"uniconfig-lighty-config.json\" file:"]},{"l":"Example output","p":["metrics/org.opendaylight.controller.uniconfig.transaction.manager.impl.UniconfigTransactionManagerImpl.transaction_invoke.csv","log/metrics.log"]}],[{"i":"uniconfig-client-sdk","l":"UniConfig Client (SDK)","p":["Uniconfig client SDK is implemented in Java 17 and uses Uniconfig's RESTconf API.","The SDK provides following advantages over raw RESTconf:","SDK is versioned and tied tied to a specific Uniconfig release","Every version is tested","Type safety","Additional features on top of basic RESTconf facade such as:","Integration with Kafka (notification listener)","Client side diff (to calculate diff between 2 versions of config)","etc."]},{"l":"Basic device configuration management","p":["An example of a simple read & write use case on top of a single device called vnf."]},{"l":"Integration with Kafka","p":["Uniconfig SDK implements a kafka listener that taps into Uniconfig notifications streams and allows the client to consume the notifications easily. Notifications available through Uniconfig are: Device notifications, alerts and telemetry but also Uniconfig generated notifications in the audit log topic. For further information see Uniconfig notifications (kafka)."]},{"l":"Client side diff","p":["Uniconfig SDK offers a diff calculation feature that can calculate a delta between a before device configuration state and after configuration state. This diff is then transformed into a patch operation and sent to Uniconfig's RESTconf API.","Client side diff calculation is useful in specific cases, where a substential amount of configuration data is avalable and modified outside of Uniconfig. Typically, the entire after state would have to be pushed into Uniconfig and Uniconfig would calculate its own diff internally. By calculating on the client side, it is possilble to reduce the network communication between client and Uniconfig (as less data has to be sent) and to reduce how much diff calculation Uniconfig has to do itself (shifting it to the client side).","This feature leverages the same implementation of diff calculation that is used withing Uniconfig itself and requires YANG schemas to be present for the computation.","Example usage:","Notable features of client side diff:","Requires YANG schemas in order to work","JSON data have to conform to those YANG models","Allows for templated values to be present in the JSON","Produces a single PATCH operation containing all changes detected. See YANG patch in Uniconfig","Changes detected: Create, Update, Delete, Reorder (lists and leaf-lists)","Uses the same implementation as Uniconfig's calculate-diff"]}],[{"l":"Developer Guide","p":["This guide provides instructions on how to extend UniConfig to support more devices, commands and operations.","Guides on how to extend UniConfig to support a new device or new commands:","Architecture","Translation Units in general","Translation Units Documentation for FRINX Uniconfig","OpenConfig to device config mapping","Developing a new translation unit","Implementing CLI Translation Unit","NETCONF Unified Translation Unit","Native-CLI translation units","Metrics"]}],[{"l":"Architecture"},{"l":"Pre-requisite reading","p":["Honeycomb design documentation:","https://wiki.fd.io/view/Honeycomb https://docs.fd.io/honeycomb/1.18.04/release-notes-aggregator/release_notes.html","CLI plugin available presentations:","https://www.dropbox.com/sh/ry2ru5vizv7st8u/AAAntbCRHb1yS_NmEpbXG1WBa?dl=0"]},{"l":"Building on honeycomb","p":["The essential idea behind the southbound plugins comes from Honeycomb. Honeycomb defines, implements and uses the same pipeline and the same framework to handle data. The APIs, some implementations and also SPIs used in the southbound plugin's translation layer come from Honeycomb. However, the southbound plugin creates multiple instances of Honeycomb components and encapsulates them behind a mount point.","The following series of diagrams shows the evolution from Opendaylight to Honeycomb and back into Opendaylight as a mountpoint:","High level Opendaylight overview with its concept of a Mountpoint:","ODL","High level Honeycomb overview:","HC","Honeycomb core (custom MD-SAL implementation) overview:","Honeycomb's core","How Honeycomb is encapsulated as a mount point in Opendaylight:","Honeycomb's core as mountpoint"]},{"l":"Major components","p":["The following diagram shows the major components of the southbound plugin and their relationships:","CLI plugin components"]},{"l":"Modules","p":["The following diagram shows project modules and their dependencies:","CLI plugin modules"]}],[{"l":"Translation Units in general"},{"l":"Module structure","p":["Translation unit is a self contained project which implements a mapping between OpenConfig based YANG models and device specific configuration. It is used by the FRINX ODL to perform translation between device specific configuration model and standard (OpenConfig) models. A unit usually consists of:","Handlers","Readers","Writers","TranslateUnit implementation","RPCs"]},{"l":"Handlers","p":["Each complex node in YANG (container, list, augment...) should have a dedicated handler (Reader, Writer)","This enables extensibility, readability and the framework can easily filter and process the data this way","Unless there is a need to also handle child nodes, in which case register the handler using subtreeAdd method from the registries","There are 2 types of handlers: Readers (Read operation) and Writers(Create, Update, Delete operation)","One can implement just the readers or both readers and writers for YANG models. Writers must have counterpart readers because of reconciliation.","Readers and Writers should use the InstanceIdentifier parameter they receive in readCurrentAttributes or writeCurrentAttributes methods to find information about keys for their parent nodes. E.g. Reader registered under ID: /interfaces/interface/config will always receive keyed version of that ID: /interface/interface[Loopback0]/config. So it can use method firstKeyOf on InstanceIdentifier to get the keys.","RWUtils class contains methods for InstanceIdentifier manipulation.","Readers and writers can be easily tested and it is necessary to provide unit tests for all of them. It's important to cover readCurrentAttributes and writeCurrentAttributes with all possible scenarios (all data there, no data there, partial data there...)","Writers may use Preconditions.checkArgument() before accessing the device. Fail of the precondition check does not invoke default rollback(opposite operation) on the writer where precondition is located."]},{"l":"Base Handlers","p":["When a handler for the same YANG node is implemented to conform various devices, it tends to lead to a lot of boilerplate and duplicate code. Therefore, we should implement a base handler for such handlers. How does it work:","create a base-project (if there isn't any) to group base handlers(e.g. for an interface handler, choose interface-baseproject)","each base handler needs to be abstract and implement same interfaces as the original handler","extract common functionality in the base handler. Common functionality means that it will conform the majority of the original handlers. If a handler does not share the extracted functionality, it needs to override original interface methods, to hide the extracted functionality.","let original handlers extend base abstract handler"]}],[{"l":"Translation Units Documentation for FRINX Uniconfig"},{"l":"Auto-generated documentation","p":["A documentation to translation-units that is generated automatically from the source code and javadocs can be found here. This documentation is useful to check actual implementations, whether a functionality is implemented for a particular device and by which protocol (netconf or cli)."]},{"l":"Manual documentation","p":["This repository contains documentation for all available translation units. A translation unit is a piece of code that includes handlers to read from or write to a specific device (e.g. Cisco IOS classic router) and facilitates the translation in OpenConfig models. The purpose of this documentation is to see which commands can be read and set and how they map to the respective YANG models. Every section has a README file that provides an overview of all show and configuration commands that are supported."]},{"l":"OPERATIONAL datasets","p":["Go to operational datasets","Show commands are commands that usually on Cisco device start with'show'. The aim is to obtain data from the router."]},{"l":"URL","p":["GET operation issued on operational datastore"]},{"l":"OPENCONFIG YANG","p":["In case of show commands this section is a sample output of a particular show command."]},{"l":"OS COMMANDS","p":["In this section we list the actual router commands with sample outputs, where the data obtained and transformed into OpenConfig YANG is marked as bold. We list show commands and outputs for each supported device OS.","IOS XR | IOS Classic/XE | Junos"]},{"l":"DEVICE YANG","p":["In case of CLI units, the unit parses the output of the CLI command directly into OC YANG. In case of Netconf units, the output is mapped to OC YANG through Device YANG (YANG model supported by the device). In case of Netconf units, the YANG is also written in documentation. This section is a link to XML unit test input testing this operation."]},{"l":"UNIT","p":["Link to github code where this show command is implemented along with unit version range."]},{"l":"CONFIGURATION datasets","p":["Go to config datasets"]},{"i":"url-1","l":"URL","p":["PUT operation with given URL will result in creating of data in config datastore DELETE operation with given URL will result in removing data in config datastore"]},{"i":"openconfig-yang-1","l":"OPENCONFIG YANG","p":["In case of configuration commands, this section represents the HTTP body in PUT operation"]},{"i":"os-commands-1","l":"OS COMMANDS","p":["In this section we list the actual router commands that are mapped to the OpenConfig YANG model. Data transformed into OpenConfig YANG is marked as bold. We list commands for each supported device OS.","IOS XR | IOS Classic/XE | Junos"]},{"i":"device-yang-1","l":"DEVICE YANG","p":["In case of Netconf units, the device yang represents command sent to the device in device YANG model. This section is a link to XML unit test input testing this configuration."]},{"i":"unit-1","l":"UNIT","p":["Link to github code where this config command is implemented along with unit version range."]}],[{"l":"OpenConfig to device config mapping"},{"l":"Finding mapping between device and the model","p":["Preferred YANG models for device config and operational data are OpenConfig models.","These models usually represent configuration part in container config and operational part in container state. Operational data is config data + operational data.","YANG models used in UniConfig framework need to be located in https://github.com/FRINXio/openconfig. In case the desired functionality is not modeled yet, you can create new YANG with its own structure or it can augment existing OpenConfig models. Guideline, how to write OpenConfig models can be found at http://www.openconfig.net/docs/style-guide/."]},{"l":"Choosing the right YANG models","p":["Before writing a custom YANG model for a unit, it is important to check whether such a model doesn't already exist. There are plenty of YANG models available, modeling many aspects of network device management. The biggest groups of models are:","OpenConfig https://github.com/openconfig/public/tree/master/release/models","IETF https://github.com/YangModels/yang/tree/master/standard/ietf","It is usually wiser to choose an existing YANG model instead of developing a custom one. Also, it is very important to check for existing units already implemented for a device. If there are any, the best approach will most likely be to use YANG models from the same family as existing units use."]},{"l":"Existing documentation","p":["There is translation-units-docs page as a single point of truth for mapping. Use`` notation for variables in the templates. This notation is postman compatible."]}],[{"l":"Developing a new translation unit","p":[".pom file of the unit","add your unit as a dependency to artifacts/pom","dependencies","handlers (readers/writers)","https://github.com/FRINXio/cli-units","https://github.com/FRINXio/unitopo-units","name of the unit should be in format device-domain-unit(e.g. ios-interface-unit, xr-acl-unit)","package name should be in format io.frinxcli|netconf., device name and domain (e.g. io.frinx.cli.unit.ios.interface)","point to correct unit parent","The easiest way how to develop a new transaction unit is to copy existing one and change what you need to make it work. E.g. if you are creating an interface translation unit, the best way is to copy existing interface translation unit for some other device, that is already implemented. You can find existing units on github:","This section provides a tutorial for developing a new translation unit.","Unit class","unit tests","What you need to add:","What you need to change:","What your unit needs to contain:"]},{"i":"best-practices-for-handlers-readerswriters","l":"Best practices for handlers (readers/writers)","p":["All comments are in English","All defined exceptions can be thrown from the code","All new dependencies and imports are actually used","All variables/methods are actually used","Before pushing the code make sure:","Chunk","Code has correct spacing","Commented out code","Comments are appropriate to the code behavior","Constants","Do not push code that contains following:","Double blank lines","java regexes","New classes/interfaces have the correct license header","New classes/interfaces/yang model have correct date","Reflection","Show commands","Static imports","Trailing whitespaces or tabs"]}],[{"l":"Implementing CLI Translation Unit","p":["CLI Translation units are located in https://github.com/FRINXio/cli-units repository. Java is used in CLI translation units."]},{"l":"Init Unit","p":["Init translation unit does not contain readers and writers but it only contains implementation of TranslateUnit. There should be only one init translation unit per device type. Purpose of the init TU is to setup CLI prompt and define rollback strategy.","The implementation of TranslateUnit needs to override methods:","SessionInitializationStrategy getInitializer(@Nonnull final RemoteDeviceId id, @Nonnull final CliNode cliNodeConfiguration)","Implement and return device specific SessionInitializationStrategy where:","Setup device CLI terminal with attributes like width and length allowing to display infinite output.","Enter desired CLI mode which will be used as default - every reader and writer gets CLI prompt in this state (e.g. EXEC mode for IOS, config mode for IOS-XR, cli mode for Junos)","These methods may be overridden if necessary:","getPreCommitHook()- method that is invoked before actual commit is written into device. For example this method can enter configuration mode.","getCommitHook()- method that invokes actual commit and should catch any error on commit. Also it should handle any post-commit actions when the commit was successful.","getPostFailedHook()- method that is invoked when commit fails. Should implement aborts or revert strategies.","getErrorPatterns()- method returning Java Patterns with regular expressions that match device specific error patterns.","getCommitErrorPattern()- method returning Java Patterns with regular expressions that match device specific error patterns that can be returned by the device after issuing commit."]},{"l":"Translate Unit","p":["Handlers(readers/writers) need to be registered in this method. Parameter context.getTransport() returns Cli object containing methods for communication with a device via CLI - should be passed to readers/writers.","Implementation of TranslateUnit must be registered into TranslationUnitCollector and must specify device type and device version during registration. Snippet below shows registration of IosXRInterfaceUnit for device type \"ios xr\" all versions.","Implementation of TranslateUnit must implement these methods:","Instance-identifier in generic reader/writer must be without keys pointing to the target composite node used in implemented reader/writer.","Instance-identifiers for YANG container and list (not for augmentations and nodes behind augmentations) are automatically generated to IIDs class (used in examples bellow) during build of openconfig project.","Return RPC services implemented in the translation unit. Parameter context.getTransport() returns Cli object containing methods for communication with a device via CLI - may need to be passed to RPC implementations. Default implementation returns empty Set.","Return unique string among all translation units which will be used as ID for the translation unit (e.g. \"IOS XR Interface (Openconfig) translate unit\")","Return YANG models containing composite nodes handled by handlers(readers/writers). Default implementation returns empty Set if no handlers are implemented.","Set getRpcs(@Nonnull Context context)","Set getYangSchemas()","Set getSupportedVersions()","String getUnitName()","This method should also registers for general Openconfig checks:","This method should return specific device version that work with this handler.","Translate unit class must implement interface io.frinx.cli.unit.utils.AbstractUnit. Naming convention for translate unit class is device-type+openconfig-domain+Unit (e.g. IosXrInterfaceUnit). Translate unit class is usually instantiated, initialized and closed from Blueprint.","void provideHandlers()"]},{"l":"Ordering of handlers","p":["As the example shows, the ip address command must be executed after the interface command.","Registration of Ipv4ConfigWriter by using the addAfter method ensures that the OpenConfig ip address data is translated after OpenConfig interface data. That means CLI commands are executed in the desired order.","rRegistry.add","rRegistry.addNoop","rRegistry.subtreeAdd","The following sample shows a CLI translation unit with dependency between 2 writers. The unit is dedicated for interface configuration on a Cisco IOS device.","This example uses method subtreeAddAfter instead of subtreeAdd. Last parameter in this method shows dependency on writer registered under IIDs.IN_IN_CONFIG.","Use for writers handling data of whole composite node subtrees. This ensures that if only a child node is updated, the writer gets triggered. Method subtreeAdd requires a set of IIDs for all handled children, the IIDs must start from the reader itself, not from root.","Use to register noop writers","Use when a reader implementation also fills composite child nodes of target composite node. Method subtreeAdd requires a set of IIDs for all handled children.","Use when common GenericConfigListReader, GenericConfigReader,* GenericOperListReader or GenericOperReader need to be registered.","Use when common GenericListWriter or GenericWriter are registered.","VRF writer should be between them. If the order is not expressed during registration, commands might be executed on device in an unpredictable/invalid order.","wRegistry.add","wRegistry.subtreeAdd","Writers are stored in a linear structure and are invoked in order of registration. When registering a writer a relationship with another writer or set of writers can be expressed using addBefore, addAfter, subtreeAddBefore, subtreeAddAfter methods. E.g. InterfaceWriter and VRFInterfaceWriter should have a relationship: InterfaceWriter -> VRFInterfaceWriter so that first an interface is created and only then assigned to VRF."]},{"l":"Device registration","p":["In TranslateUnit we had just created, e.g. MplsUnitXR4.java, we have to register device as a constant located../iosxr/init/IosXrDevices.java containing device type and version as described in TranslateUnit documentation.","This unit can reuse all writers/readers from existing ones, except the writer (or other handler) we want to alter or create (in our example writer for tunnel configuration). We have to create a new writer with desired behaviour and add it into provideWriters method."]},{"l":"Readers","p":["Readers are handlers responsible for reading and parsing the data coming from a device","There are 2 types of readers: Reader and ListReader. Reader can be used to handle container or augmentation nodes and ListReader should handle list nodes from YANG.","Both types need to implement readCurrentAttributes to fill the builder with appropriate values","ListReader needs to also implement getAllIds() where it retrieves a key for each item to be present in current list. After the list is received, framework will invoke readCurrentAttributes for each item from getAllIds","Readers should always use overloaded blockingRead method which takes in the ReadContext since that method performs caching internally","Use full version of commands e.g. show running-config interface instead of sh run int"]},{"l":"Reading of CLI and device configuration","p":["CLI readers maintain translation between device and yang models. We're sending read commands to the device and outputs are cached. This process is shown below.","Reading CLI conf from device"]},{"i":"reading-of-configuration-from-cli-network-device---different-scenarios","l":"Reading of configuration from CLI network device - different scenarios","p":["The diagram below shows four specific scenarios:","Configuration is read using show running-config pattern for the first time","Another configuration is read using running-config pattern","cache can be used","BGP configuration/state is read using \"show route bgp 100\"","the running-config pattern is not used","BGP configuration/state is read using \"show route bgp 100\" again","cached can be used","Different scenarios"]},{"l":"Mandatory interfaces to implement","p":["Each reader needs to implement one of these interfaces based on type of target node in YANG. These interfaces also contain util methods which may be used for better manipulation with data. For more information about methods please read javadocs.","CliConfigListReader- implement this interface if target composite node in YANG is list and represents config data.","CliConfigReader- implement this interface if target composite node in YANG is container or augmentation and represents config data.","CliOperListReader- implement this interface if target composite node in YANG is list and represents operational data.","CliOperReader- implement this interface if target composite node in YANG is container or augmentation and represents operational data.","In cases where you want to invoke multiple readers on reading one YANG node, extend following abstract classes:","CompositeListReader- extend this abstract class if multiple list readers need to be invoked when reading specific list in YANG.","CompositeReader- extend this abstract class if multiple readers need to be invoked when reading specific node in YANG.","A practical example of their usage is reading network instance based on it's type. All child readers need to implement a check when the particular reader should be invoked or the parent reader should move on to the next reader.","For example child reader for bgp (located under protocol) needs to check if identifier in protocol has value BGP. Otherwise reader for bgp will be invoked even if protocol identifier is OSPF."]},{"l":"Util classes","p":["ParsingUtils- use methods of this util class if you want to parse plaintext to java object builder"]},{"l":"Plaintext parsing hints","p":["Use as specific regular expressions when parsing CLI output as possible.","For Cisco CLI devices avoid using section and other advanced formatting parameters. Only include, exclude and begin are allowed.","Use CONFIG data as the source of truth when parsing information from device. Except when parsing state containers (or containers explicitly marked as config false).","I.e. use show running-config | include router ospf instead of sh ospf when retrieving ospf routers list.","In some cases, it is not possible to just use config data e.g. sh run interface does not show any data for interfaces that have no configuration. In this case it is necessary to use operational information from e.g. show ip interface brief","Use following pattern when parsing multiline output from CLI, where it is difficult to extract lines and their relationships: i.e. when parsing configured BGP neighbors per address family following command can be used:","which results in:","This output can then be parsed by:","Remove newlines to get a single line of string","Replace \"router\" with \"\" to separate bgp routers per line","Find the line that matches required router bgp","Take that line and replace \"address-family\" with \"-family\" to get address-family neighbors per line"]},{"l":"Base Readers","p":["Each base reader should contain abstract methods:","String getReadCommand()- each child reader should fill in the read command used to get information needed for this reader. Arguments may vary and they are used to be more specific in the read command (eg. when creating a command to gather information about a specific interface, you may want to pass interface name as argument).","Pattern getLine(>)- there may be more such methods and they are used to get the regular expression needed to parse output of the command (eg. in case of interface reader, you will create methods getDescriptionLine, getShutdownLine etc.)","Naming of the methods should be unified in order to be easily parsed by auto-generated documentation."]},{"l":"Writers","p":["A writer needs to implement all 3 methods: Write, Update, Delete in order to fully support default rollback mechanism of the framework","Time showed that update like 1. delete, 2. write is anti-pattern and should not be used. There is just one case where it is necessary: when re-writing list entry, you must first delete the previous entry, then write the new one, otherwise the previous entry would still be present and the new entry will be added to the list.","A writer can properly work only if there is a reader for the same composite node.","A writer should check whether the command it executed was handled by the device properly (by checking the output) and if not throw one of the Write/Update/Delete FailedException","Chunk templating framework is preferred to use in writers. It gives us:","Null safety","if/loop etc. inside templates","Default values and many more","Use full version of commands e.g. configure terminal instead of conf t"]},{"i":"mandatory-interfaces-to-implement-1","l":"Mandatory interfaces to implement","p":["Each writer needs to implement one of these interfaces based on type of target node in YANG. Unlike mandatory interfaces for reading, only interfaces for writing config data are available (because it is not possible to write operational data). These interfaces also contain util methods which may be used for better manipulation with data. For more information about methods please read javadocs.","All writers override updateCurrentAttributes method and avoid delete/write combination, unless specified in a comment.","CliListWriter- implement this interface if target composite node in YANG is list. An implementation needs to be registered as GenericListWriter.","CliWriter- implement this interface if target composite node in YANG is container or augmentation. An implementation needs to be registered as GenericWriter.","CompositeWriter- extend this abstract class when multiple writers need to be invoked on one YANG node. The writers need to implement a check whether or not should they be invoked."]},{"l":"Base Writers","p":["Each base writer should contain abstract methods:","String updateTemplate(Config before, Config after) this method returns Chunk template used for writing and updating data on the device.","String deleteTemplate(Config data) this method returns Chunk template used for deleting data from device.","If updating data is done differently than writing new data, method String writeTemplate(Config data) might be used as well."]},{"l":"Chunk Templates","p":["Each original writer transformed to use a base writer should have all it's templates written in Chunk. We extended Chunk to achieve easier manipulation with data. There is now a new filter called update. It's usage is following:","\"{$data|update(mtu,mtu $data.mtu, no mtu)}\"","$data represents the data structure on which we check if it was updated from the previous state.","mtu first argument represents the name of the field that should be checked within the $data","$data.mtu second argument represents the actual string that will be sent to the device if the value of the field named in first argument was changed or didn't exist before","no mtu third argument represents the actual string that will be sent to the device if the value of the field named in first argument was deleted","optional true fourth argument, if present, lets the filter know it should send both outputs to the device, first the delete string(third argument) then the update string (second argument)","Update filter does not send any of the strings to the device, if the value did not change.","When using this filter in updateTemplate method, you must use fT() method (format template) with one pair of the arguments being \"before\", before to let the template know what data represents the previous state.","Unfortunately, Opendaylight generates boolean fields instead of Boolean and Chunk does not work with boolean fields in the same way as any other object fields. Therefore for boolean values (eg. shutdown), you cannot use update filter and checking for changes needs to be done in a traditional way."]}],[{"l":"NETCONF Unified Translation Unit","p":["Unified translation units are located in https://github.com/FRINXio/unitopo-units repository.","Kotlin is used as preferred programming language in NETCONF translation units because it provides type aliases and better null-safety."]},{"l":"TranslateUnit","p":["Translate unit class must implement interface io.frinx.unitopo.registry.spi.TranslateUnit. Naming convention for translate unit class is just name Unit. Translate unit class is usually instantiated, initialized and closed from Blueprint.","Implementation of TranslateUnit must be registered into TranslationUnitCollector and must provide set of supported underlay YANG models. Snippet below shows registration of Unit for junos device version 17.3.","Implementation of TranslateUnit must implement these methods:","toString(): String","Return unique string among all translation units which will be used as ID for the translation unit (e.g. \"IOS XR Interface (OpenConfig) translate unit\")","getYangSchemas(): Set","Return YANG models containing composite nodes handled by handlers(readers/writers). It must return empty Set if no handlers are implemented.","getUnderlayYangSchemas(): Set","Return YANG module informations about underlay models used in the translation unit. These YANG modules describes configuration of NETCONF capable device.","getRpcs(underlayAccess: UnderlayAccess): Set>","Return RPC services implemented in the translation unit. Default implementation returns an emptySet. Parameter underlayAccess represents object containing methods for communication with a device via NETCONF and should be passed to readers/writers.","provideHandlers(rRegistry: ModifiableReaderRegistryBuilder, wRegistry: ModifiableWriterRegistryBuilder, underlayAccess: UnderlayAccess): Unit","Handlers(readers/writers) need to be registered in this method. underlayAccess represents object containing methods for communication with a device via NETCONF and should be passed to readers/writers.","How to register readers/writers is described in CLI Translation Unit "]},{"l":"Readers","p":["Readers are handlers responsible for reading and parsing the data coming from a device.","There are 2 types of readers: Reader and ListReader. Reader can be used to handle container or argument nodes and ListReader should handle list nodes from YANG.","Both types need to implement readCurrentAttributes to fill the builder with appropriate values","ListReader needs to also implement getAllIds() where it retrieves a key for each item to be present in current list. After the list is received, framework will invoke readCurrentAttributes for each item from getAllIds"]},{"l":"Mandatory interfaces to implement","p":["Each reader needs to implement one of these interfaces based on type of target node in YANG.For more information about methods please read javadocs.","ConfigListReaderCustomizer- implement this interface if target composite node in YANG is list and represents config data.","ConfigReaderCustomizer- implement this interface if target composite node in YANG is container or augmentation and represents config data.","OperListReaderCustomizer- implement this interface if target composite node in YANG is list and represents operational data.","OperReaderCustomizer- implement this interface if target composite node in YANG is container or augmentation and represents operational data."]},{"l":"Base Readers","p":["Each base reader for netconf readers should be generic. The generic marks the data element within device YANG that is being parsed into. The base reader should contain abstract methods:","fun readIid(): InstanceIdentifier- each child reader should fill in the device specific InstanceIdentifier that points to the information needed for this reader. Arguments may vary and they are used to be more specific IID (e.g. when creating an IID to gather information about a specific interface, you may want to pass interface name as argument).","fun readData(data: T?, configBuilder: ConfigBuilder, )","this method is used to transform OpenConfig data (contained in ConfigBuilder) into device data (T) using .","Naming of the methods should be unified in order to be easily parsed by auto-generated documentation."]},{"l":"Writers","p":["A writer needs to implement all 3 methods: Write, Update, Delete in order to fully support default rollback mechanism of the framework","Time showed that update like 1. delete, 2. write is anti-pattern and should not be used. There is just one case where it is necessary: when re-writing list entry, you must first delete the previous entry, then write the new one, otherwise the previous entry would still be present and the new entry will be added to the list.","A writer can properly work only if there is a reader for the same composite node.","The framework provides safe methods to use when handling data on device:","safePut deletes or adds managed data. Does not touch data that was previously on the device and is not handled by the writer.","safeMerge stores just the changed data into device. Does not touch data that was previously on the device and is not handled by the writer.","safeDelete removes data from the device only if the managed node does not contain any other information (even one not handled by the writer)"]},{"i":"mandatory-interfaces-to-implement-1","l":"Mandatory interfaces to implement","p":["Each writer needs to implement one of these interfaces based on type of target node in YANG. Unlike mandatory interfaces for reading, only interfaces for writing config data are available (because it is not possible to write operational data). For more information about methods please read javadocs.","ListWriterCustomizer- implement this interface if target composite node in YANG is list. An implementation needs to be registered as GenericListWriter.","WriterCustomizer- implement this interface if target composite node in YANG is container or augmentation. An implementation needs to be registered as GenericWriter."]},{"l":"Base Writers","p":["Each base writer should be generic and contain abstract methods:","fun getIid(id: InstanceIdentifier): InstanceIdentifier-this method returns InstanceIdentifier that points to a node where data should be written","fun getData(data: Config): T- this method transforms OpenConfig data into device specific data (T)"]}],[{"l":"Native-CLI translation units"},{"l":"Modules structure","p":["The following text block displays a structure of native-cli units that are placed under root 'cli-native-units' module with 2 device types - ios-xr-5 and junos-17. There are also init units under 'ios-xr' and'junos' directories - they are still required to be implemented, however they are already part of classic translation units. The first identifier corresponds to directory name, the second identifier placed in brackets corresponds to module name. All modules are represented by 'pom.xml' files.","Description of the modules:","cli-native-units: Root module that groups all native-CLI-only modules. Submodules are specified per device-type.","unit-parent: Parent unit common for all unit modules (for example 'ios-xr-5-native-unit' and 'junos-17-native-unit'): it specifies common imports. It doesn't need any modification when a new device-type is added.","ios-xr-5-native and junos-17-native: These modules just group unit and models submodules for specific device-types. Each supported device type should have its separated module.","ios-xr-5-native-models and junos-17-native-models: They contain all YANG schemas under \"src/main/yang\" directory - device-template YANG schemas and native-CLI YANG schemas. They are described in next sections in detail.","ios-xr-5-native-unit and junos-17-native-unit: Implementations of native-CLI translation units - these modules contain only single Java file under 'io.frinx.cli.cnative.iosxr5' or'io.frinx.cli.cnative.junos17' package that is responsible for registration of YANGs and providing of device-specific information. More information can be found in the next section.","ios-xr-cli-init-unit and junos-cli-unit-unit: Reused initialization units that are required to be registered as native-cli translation units too. These units can be shared by both classic units which require implementations of handlers and native-CLI units. It is achieved by extending of'AbstractUnitWithNativeSupport' abstract class."]},{"l":"Implementation of units"},{"l":"Device-specific units","p":["All device-specific native-CLI units must extend 'GenericCliNativeUnit' abstract class. Description of the implemented methods:","getYangSchemas(): Returned set must contain all device-specific native-CLI schemas that are placed under models module except device-template YANG module that doesn't have to be placed to this set.","getRootInterfaces(): Returned list must contain all classes of root lists and containers (classes generated by MD-SAL generators from YANG schemas) - it simplifies transition between binding-aware and binding-independent worlds.","getSupportedVersions(): Set of supported device versions - it is used for identification of translation units.","getUnitName(): Name of the translation unit - it has only descriptive purposes.","getCliFlavour()(optional): By default, Cisco IOS CLI flavour is used. CLI flavour describes formatting of device running / candidate configuration that is used during parsing of configuration into the tree. Non-Cisco devices should override this method and provide custom CLI flavour in order to make native-CLI readers work (see next example with comments that describe CLI flavour parameters).","Example: Implementation of JUNOS 17 native-CLI unit:"]},{"l":"Init units","p":["Rules for implementation of init units are same for native-CLI and classic units - see documentation: \"Implementing CLI Translation Unit\", subsection \"Init Unit\". The only difference is in the extended class - if an init unit must be registered as both native-CLI and classic translation unit (the most usual scenario), then init unit must extend'AbstractUnitWithNativeSupport' and not just 'AbstractUnit' abstract class."]},{"l":"Device-template YANG model","p":["These YANG schemas are used for describing of device-specific patterns that are required for successful communication with remote CLI. Device-template YANG schema doesn't contain any data schema nodes, it consists only from YANG extensions that are declared in the'cli-native-extensions' model. Multiple native-CLI YANG models can import the same device-template model.","Sample device-template model for IOS XR 5.* devices:","Description of the supported extensions that can be used in a device-template:","show-command: Command used for displaying of the whole running / candidate configuration. It is used for initial population of the device configuration tree that is transformed into DOM format in native-CLI readers. The default string is \"show running-config\".","config-pattern: Template used for 'set' commands that apply a new configuration or update an existing configuration. It must contain '#' placeholder that is replaced by actual command that is going to be sent to remote CLI in native-CLI writers. Default string is \"#\" (without any prefix).","delete-pattern: Template used for 'delete' commands that remove some configuration from a device. It must contain '#' placeholder that is replaced by actual delete command that is going to be sent to remote CLI in native-CLI writers. Default string is\"no #\"."]},{"l":"Native-CLI YANG model","p":["These YANG models are used for modelling of device configuration. Currently supported schema nodes include containers, lists, choice nodes, and leaves with different types. Groupings can also be freely used for organization of YANG structure. The following subsections explain general structure of the native-CLI YANG model and application of different schema nodes for modelling of device configuration with examples.","Augmentations are currently not supported in native-CLI models (except the augmentations into UniConfig configuration container which is required)."]},{"l":"Structure","p":["The following YANG snippet shows structure that should all native-CLI YANG schemas follow (variable parts are marked by square brackets):","Description of variables:","[device-type]: Type of the device for which this YANG models some part of the configuration. Examples: junos17, xr5 (if it is necessary, more specific versions can be typed).","[entity]: Part of the configuration that is modelled by this YANG. Examples: interfaces, firewall, acl.","[prefix]: Prefix that is usually an abbreviation derived from the name of the model.","[template-model]: Name of the imported device-template module. Only a single device-template module can be imported, otherwise the whole module is marked as invalid and it is skipped. Afterwards, revision-date and prefix must be selected too.","[revision] with [description]: Date of the YANG modification with description, what was changed in the specific revision. Multiple revisions can be added incrementally as YANG schema is modified.","[root-grouping]: Identifier of the root grouping that contains single root container or list. Multiple root groupings are allowed when multiple root containers are lists are required. For each root grouping there must be a separate augmentation into 'configuration' container.","Importing of the device-template model is not necessary at all - in that case, the default device template is applied - Cisco IOS XR template."]},{"l":"Containers","p":["Containers are used for representations of nodes in configuration which can have at least one child node but there is only one instance of configuration that is placed under this node. For example, let assume the following two lines in the XR 5.3.4 configuration of access-lists:","In this snippet, ipv6 is modelled by container schema node with the same identifier, because 'ipv6' command word is a root node and it can contain only single instance of list with identifier access-list:","It is also possible to wrap multiple containers in a chain. For example, the following command line:","can be modelled by following containers:"]},{"l":"Lists","p":["Similarly to containers, lists also represent command words that may have multiple children nodes. However, nodes represented by lists can have multiple instances in the configuration where individual instances are represented by one or multiple keys. Values of keys are represented by command nodes that follow list command word. For example, let consider following configuration of XR 5.3.4 interfaces:","In this case, 'interface' can be modelled as list schema node with'interface' identifier. It has a one key - interface name (possible values, based on the example, are 'MgmtEth0/0/CPU0/0', GigabitEthernet0/0/0/0, GigabitEthernet0/0/0/1, and GigabitEthernet0/0/0/2).","Name of the leaves that represent list keys are not important. Only an order of keys, in case of multiple keys, has a significance from the view of association between configuration and YANG model.","The second example presents a scenario in which a list with multiple keys must be used (IOS XR 5.3.4 access-lists):","There are two keys - name of the access-list and sequence number of the access-list entry that must be unique in scope of the single access-list. Because of this, access-list can be modelled by following list schema node:"]},{"l":"Choices","p":["Identifiers of list, container, or leaf schema nodes are always derived from words identifying parts of the command lines. Choice schema nodes are modelled differently - they are only used for modelling of multiple non-overlapping sets of children commands. Both identifier of choice schema node and case nodes are not important (names of the case schema nodes are usually chosen based on logical option they represent). Choices are handy, if it is required to add YANG-based constraint on combinations of entered commands - wrong combinations of command would fail on device anyway.","For example, the JUNOS 17 allows configuration of different interface types:","In this example, 'hold-time' is a configuration that can be applied only on physical interfaces. On the other side, LACP can be configured only on the bundle interfaces. Because of this logical separation, it has a sense to differ between physical and bundle interfaces in YANG (common settings can be placed directly under 'interfaces' list):"]},{"l":"Leaves","p":["Leaves are used for representation of command parts that don't have next children subcommands. Command node can be represented by one or more words depending on the type of the leaf. The following types of leaves are currently supported:","1. Empty: Empty leaf can be used for commands without any value(there is only one command word that identifies leaf). For example, JUNOS 17 interface 'disable' command:","can be modelled as leaf with empty type:","2. Types with primitive value: Supported primitive types include boolean, string, decimal, int8, int16, int32, int64, uint8, uint16, uint32, and uint64. All of these types can be used for commands that has a single string, boolean, or numeric value (types constrained by a range). The following commands can be modelled as one of these types(different JUNOS 17 damping settings):","YANG representation of leaves 'half-life' and 'suppress':","3. Enumeration- If there are multiple but finite set of possible strings assignable to the command, then the enumeration type should be used. Let consider the following variations of the 'mode' command (IOS XR 5.3.4 LACP configuration):","In this example, 'mode' is modelled as leaf with type enumeration with three possible values:","4. Bits: This type of leaf can be used in scenarios in which there are multiple possible values assignable to the command (similarly to enumeration), but they are not mutually exclusive - different values can be combined in a chain of strings. Consider the following options how to configure Unicast Reverse Path Forwarding on IOS XR 5.3.4:","The part of the command line starting by word 'any' can continue with random combination of options 'allow-self-ping' and 'allow-default' with random ordering too. Because of this reason, leaf with identifier 'any' has bits type:","5. Blob-data- It is a special type of leaf defined in'cli-native-extensions' that can be used for the whole command section with a random structure. It is handy for the parts of the configuration that are too complicated to be represented by different YANG structures. Internally, 'blob-data' is a type definition derived from string type. For example, JUNOS 17 firewall rules fulfils high complexity:","Commands 'from' and 'then' can be represented by leaves with'blob-type':"]}],[{"l":"Metrics"},{"l":"Monitoring Uniconfig performance","p":["Dropwizard Metrics is the framework of choice to monitor performance."]},{"l":"Registry naming","p":["All the metrics are currently stored in the uniconfig registry. It can be accessed like so:"]},{"l":"Metric types","p":["All the available metric types can be seen in the documentation."]},{"l":"Naming convention","p":["There are various best practice articles on how to name metrics but one thing is common: It should be clear what is measured."]},{"l":"Adding new metrics"},{"l":"Adding a Meter","p":["Obtain a Meter and then mark all the method calls you want to measure."]},{"l":"Adding a Gauge","p":["For Gauge method getValue() needs to be implemented. It can be done less verbously with lambda expressions so that we avoid writing boilerplate code for an anonymous class:","Here we create a Gauge that returns Integer value, access is synchronized in this case to avoid race conditions."]},{"l":"Tags","p":["Tags are currently not available in the version 4.2.x, although support for them is planned for future major release."]},{"l":"Reporters","p":["Current available reporters are reports to CSV files and reporting via Slf4j to log file."]}],[{"l":"Release notes","p":["Release notes for UniConfig 4.2.10","Release notes for UniConfig 4.2.3","Release notes for UniConfig 4.2.4","Release notes for UniConfig 4.2.5","Release notes for UniConfig 4.2.6","Release notes for UniConfig 4.2.7","Release notes for UniConfig 4.2.8","Release notes for UniConfig 4.2.9","Release notes for UniConfig 5.0.1","Release notes for UniConfig 5.0.10","Release notes for UniConfig 5.0.11","Release notes for UniConfig 5.0.12","Release notes for UniConfig 5.0.13","Release notes for UniConfig 5.0.14","Release notes for UniConfig 5.0.15","Release notes for UniConfig 5.0.16","Release notes for UniConfig 5.0.17","Release notes for UniConfig 5.0.18","Release notes for UniConfig 5.0.19","Release notes for UniConfig 5.0.2","Release notes for UniConfig 5.0.20","Release notes for UniConfig 5.0.21","Release notes for UniConfig 5.0.22","Release notes for UniConfig 5.0.23","Release notes for UniConfig 5.0.24","Release notes for UniConfig 5.0.25","Release notes for UniConfig 5.0.3","Release notes for UniConfig 5.0.4","Release notes for UniConfig 5.0.5","Release notes for UniConfig 5.0.6","Release notes for UniConfig 5.0.7","Release notes for UniConfig 5.0.8","Release notes for UniConfig 5.0.9","Release notes for UniConfig 5.1.0","Release notes for UniConfig 5.1.1","Release notes for UniConfig 5.1.10","Release notes for UniConfig 5.1.11","Release notes for UniConfig 5.1.12","Release notes for UniConfig 5.1.13","Release notes for UniConfig 5.1.14","Release notes for UniConfig 5.1.2","Release notes for UniConfig 5.1.3","Release notes for UniConfig 5.1.4","Release notes for UniConfig 5.1.5","Release notes for UniConfig 5.1.6","Release notes for UniConfig 5.1.7","Release notes for UniConfig 5.1.8","Release notes for UniConfig 5.1.9","Release notes for UniConfig 5.2.0","Release notes for UniConfig 5.2.1","Release notes for UniConfig 5.2.2"]}],[{"i":"uniconfig-507-release-notes","l":"Uniconfig 5.0.7 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implementation of context-match shell operation"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed establishing of NETCONF stream sessions","Fixed NetconfDeviceCommunicatorTest","Fixed Uniconfig client json parser tests","Fix building history output","Fixed execution of YANG action under some list entry from shell","Fixed ordering transaction log by date","Fixed types of the network-instance/interfaces","Making subscription monitoring loop more robust","Cli session closed/disconnected","Fixed removing of data-change-event subscription","Fixed merging template attribute to replaced node","CLI shell: harmonised composite key delimiter input"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Updated netconf-node-topology:concurrent-rpc-limit parameter","Refactored global DOMSchemaService","Optimization of calculate-diff RPC","Swagger and YangPackager improvements","Fix all owasp sec issues level 8","Upgrade sshd libs to version 2.8.0","Upgrade and cleanup usage of jaxb","Upgrade jetty/jersey/jax-rs dependencies","Reorganisation of NETCONF connection parameters"]}],[{"i":"uniconfig-506","l":"UniConfig 5.0.6"},{"i":"new-features","l":"✅ New Features"},{"l":"Expose operational data about transactions","p":["It would improve visibility what transactions are open on uniconfig instance - when these transactions have been open and what nodes have been changed in the transaction.","Transaction data:","identifier (uuid)","trace id / different parameter (once we support tracing)","creation time","last access time","idle timeout, hard timeout","list of changed nodes (incl. topologies)","additional context (random string, text column)"]},{"l":"Implement metric collection and reporting in Uniconfig","p":["Collect and report metrics such as:","TX pre minute","RPC calls per minute","Task execution queue size","Netconf msg sent count","CLI command sent count","…","Reporting part could be just logging the state of metrics for the time being"]},{"i":"collect-open-transactions-data-in-collect_diag_infosh","l":"Collect open transactions data in collect_diag_info.sh","p":["Please enhance debug collection script to collect details of following","Open Transaction , Read or Read-Write and if possible module which has opened the transaction","For example, this is how NCS displays.","This could help in debugging slowness issues caused if there is any transaction leak."]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Set OWASP dependency check plugin to level 9","p":["decrease owasp level to 9 (in distribution/packaging/zip pom)","fix all dependency issues so that uniconfig will successfully build"]},{"i":"cli-uc-shell---show-transaction-log-in-ordered-list--add-brief-option","l":"CLI UC shell - show transaction log in ordered list & add \"brief option\"","p":["Currently we display the transaction log as a json without ordering. We should assume that the transaction log can become very large and should still be manageable to display. Hence we are proposing the following improvements:","always show the transaction log as an ordered list. Order by transaction timestamp. The most recent transaction should be at the bottom of the list.","add a \"brief\" option to that command and display only one line per transaction log. Similar like this"]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"OpenAPI .yaml file generating incorrectly","p":["Build, Collaborate & Integrate APIs | SwaggerHub","cli-unit-general API yaml seems incorrectly generated, URIs are wrong"]},{"i":"syst_-data-change-subscriptionscontentnonconfig-not-working","l":"SYST_ data-change-subscriptions?content=nonconfig not working","p":["Based on documentation Kafka notifications","test:","test here https://gerrit.frinx.io/c/system-tests/+/13155"]}],[{"i":"uniconfig-505","l":"UniConfig 5.0.5"},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Reconfigure swagger generator for versa to produce desired depths for all APIs","p":["Since we need to create APIs with depth 4, I have noticed APIs are created to the last container when the depth of yang is less than 4. Can you make API is not generated for the last container, for example 2nd API in the below not required as “global” is the leaf container. This change will reduce size of yaml file and number of APIs"]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"Uniconfig transaction is not thread-safe","p":["It is not safe to use same uniconfig transaction simultaneously by multiple user-side threads because underlying database connection/transaction is not thread-safe in case of PostgreSQL driver and UniConfig is not doing any additional synchronisation.","Read: Chapter 10. Using the Driver in a Multithreaded or a Servlet Environment","Behaviour that was also observed in UniConfig (it is Oracle DB, but symptoms are similar): Working with multiple threads sharing a single connection"]},{"i":"failed-to-find-node--in-the-topology-uniconfig","l":"Failed to find node '' in the topology 'uniconfig'","p":["It happened already a couple of times when a workflow task failed during the execution of getting data from the device. In VFZ we have a specific task for this operation called uniconfig_read_structured_device_data which gets a specific config from the device","During this execution, there were other devices running in the parallel executing the same task and 2 read_and_execute_rpc_cli tasks too.","The device had the records in the node and mounpoint tables in the UC DB.","DONE, MOVE TO 5.0.4"]}],[{"i":"uniconfig-504","l":"UniConfig 5.0.4"},{"i":"new-features","l":"✅ New Features"},{"l":"Adding option to use json-path also for selection of some subtrees","p":["Currently, jsonpath language can be used in UniConfig only for filtering of data. However, the language itself allows also to select some data using provided json-path.","We need to expose this functionality in UniConfig API using some query parameter (the similar way as it is done for filtering) and also expose this functionality in the uniconfig-client."]},{"i":"shell-scrolling-output---more--","l":"Shell: scrolling output (--more--)","p":["Long UniConfig shell output should be displayed using some scrolling mechanism (equivalent to ‘more' or 'less’ linux tools)."]},{"i":"add-show-history-command-to-uniconfig-shell","l":"Add 'show history' command to uniconfig-shell","p":["It should display last N commands that were executed in the shell. Syntax:","Parameter 'max-number-of-output-commands' should be optional.","This command should be available from both operational and configuration modes."]},{"l":"Add support for aliases inside uniconfig shell","p":["There should be some configuration file in the config directory that will contain defined aliases. It should support also place-holders/variables for both values and arrays.","Supporting autocomplete on aliases."]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Improve maven plugin for generation Java classes","p":["yang packager: generate sources only for latest repository","settings: package name - option to change it","setting: disable prefix + javadoc on data-elements"]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"NETCONF sessions used for receiving NETCONF notifications stop working","p":["UniConfig does not use keepalive messages for checking, if NETCONF session used for receiving of NETCONF notifications is still alive and triggering reconnection procedure. As a consequence, if connection is dropped without explicit TCP interruption, then UC will not find it out and doesn’t try to re-create connection. However, device is using some form of TCP keepalive messages - device will drop connection after some time.","FIX: Uniconfig is enables and tracks keepalive messages also for NETCONF sessions that are used for NETCONF streams"]},{"l":"Optimisation of jsonb-filtering","p":["It seems that jsonb-filtering causes reading of whole configuration from PG into UC even if it should not be necessary:","checking existence of the node - currently it works by using DOM ‘exists’ operation that reads whole configuration from DB","deriving uniconfig-native prefix / YANG repository - it is derived from configuration, not from DB metadata","FIX: stopping verification of node, if jsonb-filer is used. It required reading of whole config from DB to Uniconfig what making the call much slower."]},{"i":"uc-shell-after-configrequest-commit-the-prompt-was-changed-unexpectedly-from-request-to-config","l":"Uc shell: after config/request commit the prompt was changed unexpectedly from request> to config>","p":["Previous behaviour:","After fix:"]},{"i":"uc-shell-config-mode--show-or-delete---create-object-option-should-be-removed","l":"Uc shell: config mode / show or delete - CREATE OBJECT option should be removed","p":["Previous behaviour:","e.g. here (create new template) should not be present here. Similarly others places in program. Behaviour like this is not expected by user.","After fix:"]},{"i":"uniconfig-503-prints-out-error-on-start","l":"Uniconfig 5.0.3 prints out error on start","p":["Fix: error message in the log was displayed when notifications were set to false in the lighty-uniconfig-config.json file. This error message is no longer displayed."]},{"l":"Flyway migration failed","p":["Migration of data in the database when switching to another version throws error causing that uniconfig is unable to start."]},{"l":"Yang patch operation does not work correctly with leaf list","p":["There are several issues with yang patch when operating on leaf-lists. In particular I have found issues with the insert operation and the merge operation.","Merge operation case:","If the leaf list does not exist, then the merge operation pass without problems. However, if the leaf list does already exist, then the merge fails with following error message:","Insert operation case:"]},{"l":"PATCH operation does not work with some paths and target combination","p":["Overview RestConf PATCH operation does not work with certain combination of URL and target","Details","Request URL ( is the UniConfig host, is the id under which the Sonic device is installed):","http:///rests/data/network-topology:network-topology/topology=uniconfig/node=/frinx-uniconfig-topology:configuration/openconfig-interfaces:interfaces/interface=Ethernet52","Method: PATCH Request body:","Produce this response:","Request body:","Produce empty response body with error code 500. UniConfig logs have this record:"]},{"l":"Portchannel trunk-vlans replace","p":["When one trunk vlan is already set on porchannel, then put request to change trunk vlans list returns:","Unsupported type of node …","Current workaround is deleting whole list of trunk-vlans, after that single put request to add removed and new trunk-vlans.","Postman collection is attached. Replace can be also done using this gnmic command:"]}],[{"i":"uniconfig-503","l":"UniConfig 5.0.3"},{"i":"new-features","l":"✅ New Features"},{"l":"Adding failed transactions into transaction log","p":["Description","Previously, only successfully committed transactions have been written into transaction log.","Added state to transaction log entry that determines, if transaction has been successfully or not committed - both successful and failed transactions are part of transaction log.","Documentation","Transaction Log | Frinx Docs","API","Added ‘status' leaf and split ‘commit-time’ into ‘last-commit-time’ and 'failed-commit-time’ (YANG module transaction-log):"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Integrated OWASP dependency check tool into UniConfig","p":["3-rd party libraries are check against security issues during building of UniConfig distribution. If there are some issues with security level higher than configured threshold, built will fail.","Set security threshold level to 10 and fixed corresponding critical errors."]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"i":"fixed-updating-of-leaf-list-content-orderedunordered-on-netconf-device","l":"Fixed updating of leaf-list content (ordered/unordered) on NETCONF device","p":["Unordered leaf-list must be updated using following steps:","removing all items that are not in the updated list","inserting all items that are only in the updated list","There was 1 bug: step 1. never happened.","Ordered leaf-list must be updated using following steps:","inserting/moving new/existing items in the correct order (using ordering parameters) [1]","removal of all items that are not in the updated list [2]","There was 1 bug - all items were removed and afterwards re-inserted without usage of special positional attributes - it created conflict on NETCONF layer between edits and thus splitting of NETCONF traffic into 2 edit-config messages."]},{"i":"device-discovery-32-prefix-changed-to-inclusive","l":"Device discovery /32 prefix changed to inclusive","p":["User must be able to ping network with prefix /32 - it must be rendered as single host (special case)."]},{"i":"uniconfig-shell---exit---it-is-expected-to-hit-enter-twice","l":"UniConfig shell - exit - it is expected to hit enter twice","p":["Previous behaviour:","After fix, UniConfig will print user some message, that one more is expected to leave SSH session."]}],[{"i":"uniconfig-502","l":"UniConfig 5.0.2"},{"i":"new-features","l":"✅ New Features"},{"l":"Upgrading templates","p":["Added ‘upgrade-template' RPC into 'template-manager’ YANG module:","API","Both settings are related to auto-upgrading process - they don’t influence execution of RPC which can be still done manually.","Configuration","Description","Documentation","if it fails, we will return standard RESTCONF RFC-8040 error container","Implemented automation of the upgrading process - calling of this RPC automatically at initialisation of UniConfig for all templates present in the DB that don’t use the latest repository.","Implemented RPC for upgrading template to specific YANG repository.","output template name (optional, default value = input template name)","RPC input:","RPC output:","Supplemented template configuration by 2 new settings (lighty-uniconfig-config.json) - enabledTemplatesUpgrading and maxBackupTemplateAge.","template name (mandatory)","Templates Manager| Frinx Docs","without body, just status message","YANG repository name (optional, default value = latest YANG repository)"]},{"l":"Connection notifications","p":["Description","Connection notifications are generated after state of southbound CLI/NETCONF/GNMI node is updated - either status message or connection status.","Notifications are published into dedicated Kafka topic.","They are useful especially for debugging connection issues between UniConfig and network devices.","Documentation","Kafka Notifications | Frinx Docs","API","Structure of notifications are described following YANG module 'connection-notifications':","Added settings used for configuration of Kafka topic and enabled/disabled state (YANG module kafka-brokers):","Configuration","Supplemented corresponding settings into in the lighty-uniconfig-config.json file (by default, these notifications are enabled if globally notification system is enabled):"]},{"l":"Configurable transaction idle-timeout","p":["Description","Introduced new transaction parameter that can be used at creation of new transaction and overrides global idle-timeout.","After inactivity of the transaction, it is automatically closed and an exception will be thrown if user tries to invoke some operation on the transaction.","Documentation","Example request with timeout parameter | Frinx Docs","API","Format of the query parameter 'timeout':","Uniconfig-client","Introduced TransactionParameters class - object of this class can be provided at creation of new transaction. By default, transaction-specific idle-timeout is disabled - global idle-timeout is used."]},{"l":"Added option to disable validation phase at commit","p":["Description","UniConfig uses 3-phase commit procedure - validation, confirmed-commit, confirming-commit. Validation is currently always executed on nodes that support validation and have been installed with enabled validation.","This feature introduces flag in the commit RPC using which user can control execution of validation phase.","Documentation","RPC commit | Frinx Docs","API","Added 'do-validate' field into commit RPC input (checked-commit does not supported this feature for now):","Uniconfig-client","In the uniconfig-client validation is 'disabled' by default (opposite behaviour in comparison to RESTCONF API).","Exposed new method in DOMReadWriteTx interface:"]},{"l":"Modification of connection parameters after the first installation without uninstallation","p":["Description","After some CLI/NETCONF device has already been installed, it is possible to update some connection / mount parameters (for example, ‘host' or 'password’).","User can read and update connection parameters under ‘cli' or 'topology-netconf’ topology, under specific network-topology nodes.","Afterwards, UniConfig will use updated connection parameters at the next creation of connection to device.","NETCONF sessions used for receiving of NETCONF notifications are also updated at the next monitoring iteration.","Documentation","Updating installation parameters | Frinx Docs","Uniconfig-client","Example:"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Improved aggregation of NETCONF messages","p":["Non-overlapping edit-config messages are already aggregated into one edit-config message that is sent to NETCONF server. However, this aggregation was primitive - it just serialised all modified subtrees and stacked them under root element without considering option, that paths to these subtrees may overlap.","After this improvement, edit-config message will contain compressed subtree structures without duplicated ‘wrapper’ elements."]},{"i":"added-session-id-to-netconf-logs-netconf-messages","l":"Added session-id to NETCONF logs (netconf-messages)","p":["Description","Previously, only internal Netty’s channel-id was displayed in the logs.","After this improvement, NETCONF-specific session-id, returned from device during exchanging of capabilities, will be used.","Documentation","Logging Framework | Frinx Docs","Making connection-manager unit tests more robust","Preventing random failures because of multi-threaded environment.","Improve error message if device/template doesn't exist","If device/template or another node doesn’t exist, UniConfig should return user-friendly error message that corresponding node doesn’t exist and not some YANG-related error.","Creation of new node with specified YANG repository is still allowed.","Error message before the fix:","Error message after the fix:"]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"Fixed YANG packager that does not catch broken submodules","p":["Description","Fixed reporting of two kinds of issues related to YANG submodules:","Submodules contain statement “belongs-to” some parent. That parent should contain statement “include”. When parent does not contain this statement, uniconfig marks submodule as broken.","When submodule contains “belongs-to” statement, but parent does not exists.","Improved error message output from YANG packager utility.","Documentation","Device Discovery | Frinx Docs"]},{"i":"fixed-device-discovery-behaviour-for-network-with-31-prefix","l":"Fixed device-discovery behaviour for network with /31 prefix","p":["Description","Use cases:","192.168.1.0/32 - returns empty output, there aren’t any usable hosts that can be reached","192.168.1.0/31 - special case, device-discovery component should verify two hosts - .1 and .2","192.168.1.0/30 - returns 192.168.1.1, 192.168.1.2","Documentation","Device Discovery | Frinx Docs"]},{"i":"fixed-calculate-diff-operation-augmentation-nodes","l":"Fixed calculate-diff operation (augmentation nodes)","p":["Augmentation nodes have been skipped and unwrapped during reading of data from device. It resulted in the failed / incorrect calculation of diff on UniConfig layer.","After this fix, UniConfig skips only those augmentation nodes that contain only non-config data nodes (YANG 'config false' statement)."]}],[{"i":"uniconfig-501","l":"UniConfig 5.0.1"},{"i":"new-features","l":"✅ New Features"},{"i":"propagation-of-data-change-events-from-uniconfig--unistore-configuration","l":"Propagation of data-change-events from ‘uniconfig' / 'unistore’ configuration","p":["Description","Implemented propagation of data-change-events into distinct Kafka topic. Data-change-events are currently supported per-node in ‘uniconfig' and 'unistore' network-topologies.","Using subscription, user specifies observed subtrees against data-changes. Afterwards, data-change-events are generated by UniConfig instances after some transaction is committed and committed changes contain subscribed subtrees.","API","Created new YANG module that defines data-change-events structure in form of YANG notifications and RPC calls for manipulation / reading of subscriptions:","Documentation","Kafka Notifications | Frinx Docs","Configuration","Added settings into lighty-uniconfig-config.json file:","dataChangeEventsEnabled- turning on/off generation and distribution of data-change-events (by default, they are enabled)","dataChangeEventsTopicName- name of the Kafka topic (default identifier is 'data-change-events')","Java client","Example, how to use data-change-events as triggers for callback inside UniConfig Java client:"]},{"l":"Added config option to disable immediate-commit model","p":["Description","Immediate-commit model is in some cases dangerous, because changes are automatically committed to managed network devices.","Added option to disable immediate-commit model globally.","Configuration","New setting 'isImmediateCommitEnabled' in the lighty-uniconfig-config.json:","Default value is 'true'."]},{"l":"Calling replace-config-with-oper after sync-from-network in the immediate-commit-model","p":["In the immediate-commit-model, if user called sync-from-network operation, it behaved as 'sync-to-network' operation:","reading configuration from device","resolving diff between actual (device) and intended state (last saved configuration in database)","sending resolved diff to operation - reverting changes, that have been done on device side","This dangerous if network device is configured manually by user or another tool.","Fixed by calling replace-config-with-oper operation after called-sync-from-network operation and before committing temporary transaction created in the immediate-commit model session. It will result in storing of loaded configuration to database without performing any action on managed devices.","This change alters only immediate-commit model. Build-and-commit model stays unaltered."]},{"l":"Making default CLI connection parameters configurable","p":["Description","There are couple of CLI connection parameters with some default values defined in cli-topology YANG module that can be specified at installation of device.","This feature allows user to adjust these default parameters without repetitive adjustment in the install-node RPC request.","Priority of using install parameters:","Parameter set in install RPC request","Default parameter set in database","Default parameter from YANG model","Documentation","Device installation | Frinx Docs","API","Exposed default CLI settings into distinct container that is accessible using RESTCONF API (module cli-topology):","Exposed settings in the UniConfig shell - configuration mode / settings container."]},{"l":"Making default NETCONF connection parameters configurable","p":["Description","There are couple of NETCONF connection parameters with some default values defined in netconf-topology YANG module that can be specified at installation of device.","This feature allows user to adjust these default parameters without repetitive adjustment in the install-node RPC request.","Priority of using install parameters:","Parameter set in install RPC request","Default parameter set in database","Default parameter from YANG model","Documentation","Device installation | Frinx Docs","API","Exposed default NETCONF settings into distinct container that is accessible using RESTCONF API (module netconf-node-topology):","Exposed settings in the UniConfig shell - configuration mode / settings container."]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Improved displaying of children nodes of DataNode in Java client","p":["Children nodes were organised under multiple levels of Map objects - it was not readable especially when user was debugging code.","Now, children nodes are displayed directly under simple List collection:"]},{"l":"Added YANG-based documentation to Java client","p":["Added JavaDoc description to DataNode and DataNodePath sub-classes, how they are used in comparison to YANG schema tree.","Example:"]},{"l":"Removed redundant module-name prefixes from built paths in Java client","p":["RFC-8040 specifies that module-name prefix must be added only to the first augmented elements (transition to different namespace).","Previously it worked non-optimally - module name was added to all elements of the path:","After improvement:"]},{"l":"Added option to enable PostgreSQL driver logs in UniConfig","p":["Description","Logging connections and communication between UniConfig and PostgreSQL can be handy in case of debugging of some errors.","Configuration","To log detailed information about executed queries and PG connections, user should set org.postgresql logger level to DEBUG or TRACE."]},{"l":"Added transaction-id also to both RESTCONF requests and responses","p":["Description","UniConfig transaction-id simplifies debugging of executed RESTCONF operations.","Example (added 'Uniconfig transaction' property):","Documentation","Logging Framework | Frinx Docs"]},{"l":"Hiding sensitive data in logs","p":["In the UniConfig logs are shown sensitive data like PostgreSQL DB credentials, etc. This is a potential security hole.","Example:","Fixed by hiding JSON configuration parsing details from logs."]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"Fixed invocation of device-discovery RPC multiple times","p":["RPC response contained also results from previous RPC invocation.","Fixed by isolation of RPC results."]},{"i":"fixed-setting-of-max-connection-attempts-during-device-installation-clinetconf","l":"Fixed setting of max-connection attempts during device installation (CLI/NETCONF)","p":["Description","Removed max-connection-attempts parameter from install-node RPC. It was clashing with parameters from southbound layers and introducing confusion.","Fixed switched loading of max-connection-attempts and max-reconnection-attempts on NETCONF layer. It resulted in the infinite initial maximum connections attempts (by default, there should be 1 attempt).","Setting default max-connection-attempts to 1 in YANG model (both CLI and NETCONF layers).","Documentation","Updated document: Device installation| Frinx Docs","API","connection-manager - removed leaf max-connection-attempts:","cli-topology - setting max-connection-attempts default value to 1:","netconf-node-topology - setting max-connection-attempts default value to 1:"]},{"l":"Fixed stuck UniConfig API because of interrupted SQL operation","p":["Description","Default socket-read-timeout for the PostgreSQL driver is 0 - UniConfig is waiting forever for result of some query. This causes blocking of other UniConfig operations on specific node, if connection between UniConfig and PG is dropped during execution of some query.","Fixed by exposing socketReadTimeout parameter and setting its default value to 20 seconds.","Configuration","Added socketReadTimeout to database connection settings (lighty-uniconfig-config.json):"]},{"l":"Fixed propagation of error on disabled templates","p":["If templates are disabled, then user will get direct error message with 400 status code. Previously it failed on some parsing error or it didn’t fail at all and UniConfig just ignored unknown data.","Example:"]},{"i":"fixed-pki-authentication-to-netconf-device-negative-cases","l":"Fixed PKI authentication to NETCONF device (negative cases)","p":["PKI authentication on device - attempt to install device with reference to not existing private key","Previously it failed with error:","After fix it will fail with error message that private key with specified identifier doesn’t exist.","PKI authentication on device - registering the password protected key with RPC netconf-keystore:add-keystore-entry - but providing bad password","Fixed by validation of input password against key-store.","If it is invalid, UniConfig will return error immediately and will not try to register such private key and afterwards use it during mounting process."]},{"i":"netconf-edit-config-operation-with-insert-attribute-failed-because-of-aggregated--messages","l":"NETCONF edit-config operation with insert attribute failed because of aggregated messages","p":["When insert attribute was used with value before/after, there was problem with NETCONF messages ordering in the aggregated message.","Fixed by assuring that list entry specified by insert attribute is placed before actual list entry in the edit-config message sent to NETCONF server."]}],[{"i":"uniconfig-4210","l":"UniConfig 4.2.10"},{"i":"new-features","l":"✅ New Features"},{"l":"Aggregation of all edit-config NETCONF messages into one edit-config message","p":["Each modification in the transaction was expressed using one edit-config message on southbound layer.","This approach was not optimal:","it generated more network traffic than needed","it could introduce errors, if device checks some references before committing configuration","After this patch, all NETCONF edit-config RPCs in the transaction are aggregated into single edit-config RPC with common parent element."]},{"i":"capturing-changes-in-ordered-listleaf-list-using-calculate-diff-rpc","l":"Capturing changes in ordered list/leaf-list using calculate-diff RPC","p":["Currently, changed order of list entries inside ordered list/leaf-list is displayed as updated whole list with all list entries - not optimal solution.","Added new list to calculate-diff RPC output that captures changes in the ordering of list or leaf-list elements. Such changes are not displayed under created/removed/updated containers."]},{"l":"Validation of leaf-refs","p":["Validation of leaf-ref YANG constraints that are affected by some create/delete/update operation:","Supported following leaf-ref paths:","absolute paths","relative paths","paths with 'current()' XPATH function","Added new RESTCONF query parameter into put/patch/delete operations - checkForReferences.","Implementation conforms RFC 7950 - The YANG 1.1 Data Modeling Language"]},{"l":"Encryption of leaves selected by paths","p":["UniConfig uses asymmetric encryption for ensuring confidentiality of selected leaf and leaf-list values. Currently, only RSA ciphers are supported (both global UniConfig and device-level key-pairs). Encryption is supported in ‘uniconfig’, ‘unistore’, and ‘templates’ topologies.","Global-device encryption architecture - both UniConfig and device uses PKI for encryption of data:","Global-device encryption architecture","In comparison to Global-device encryption architecture this model uses only global key-pair for encryption of data. Devices contain only plaintext data."]},{"i":"implementation-of-crypt-hash-type-from-iana-crypt-hash-yang-module","l":"Implementation of ‘crypt-hash' type from 'iana-crypt-hash’ YANG module","p":["UniConfig supports 'iana-crypt-hash' YANG model for specification of hashed values in data-tree using type definition 'crypt-hash'. Hashing works in the 'uniconfig' and 'unistore' topologies. Only NETCONF devices are currently supported because CLI cannot be natively used for reporting of device capabilities that would contain supported hashing function.","Hashing is done only in the RESTCONF layer after writing some data that contains leaves/leaf-lists with 'crypt-hash' type. Afterwards, UniConfig stores, uses, and writes to device only hashed representation of these values.","All 3 hash functions are implemented - 'MD5', 'SHA-256', 'SHA-512'. In case of 'uniconfig' topology, hashing function is selected based on reported feature in the NETCONF capability, in case of 'unistore' topology, UniConfig enforces 'SHA-512' hashing function.","Hashing model"]},{"l":"Using the latest schema at creation of template","p":["Adding configuration into UniConfig that tracks identifier of the UniConfig repository that must be used at creation of new template, if user doesn’t explicitly specify identifier of this repository using ‘schema-cache-directory’ query parameter."]},{"l":"Rebalancing of notifications cluster at runtime","p":["Random distribution of subscriptions to NETCONF notifications streams and turning on/off UniConfig instances may lead to scenario when one of the UniConfig instances in the cluster contain most of the subscriptions while others unequally smaller number.","Fixed by automatic redistribution of already created subscriptions on UniConfig instances and introduction of limits, how many subscriptions can be allocated on the one UniConfig instance in the cluster.","Cluster rebalancing"]},{"l":"Configuration","p":["Added new parameters under “notifications“ element in the lighty-uniconfig-config.json file:"]},{"l":"Implementation of RFC-8072 PATCH operation","p":["Invocation of PATCH that may contain multiple edits.","All edits are invoked sequentially and atomically as single operation.","Supported sub-operations per edit: create, delete, insert, merge, move, replace, remove.","More detailed description: RFC 8072 - YANG Patch Media Type"]},{"i":"added-missing-protocols-to-l2-for-ios-xe-cli-units","l":"Added missing protocols to L2 for IOS XE (cli-units)","p":["Parsing of following protocols:","elmi","pagp","udld","ptppd"]},{"l":"UniConfig whitelist","p":["specification of top-level containers/lists which configuration is synced from device (no other configuration is read from device)","opposite of existing blacklist functionality","either blacklist or whitelist can be specified, not both","API","updated YANG model that defines whitelist/blacklist:","Install-node RPC example (input body):"]},{"l":"UniConfig client thread model","p":["make uniconfig-client thread safe (using client from multiple threads)","making HTTP connection pools configurable (max connections, …)","API:","Introduced connection pool settings:","Introduced UniConfig server settings:","Example:"]},{"l":"Distribution of NETCONF notifications to Kafka","p":["NETCONF devices are capable of generating NETCONF notifications. UniConfig is able to collect these notifications and creates its own UniConfig notifications about specific events. Kafka is used for publishing of these notifications from NETCONF devices and UniConfig. Currently there are these types of notifications: - NETCONF notifications - notifications about transactions - audit logs (RESTCONF notifications).","NETCONF notifications - Kafka","API","Added subscription API to install-node request - 'stream' container. Example (subscription to 2 NETCONF streams - ‘NETCONF' and 'system’):","Added root list 'netconf-subscription' which contains all active subscriptions.","Corresponding YANG model:","Configuration","Provided initial configuration that can put into lighty-uniconfig-config.json:","Uniconfig-client","Example:"]},{"l":"Dynamic configuration of Kafka brokers","p":["Location of Kafka brokers and other Kafka settings must be configurable using RESTCONF API.","Persistence of this configuration in the database. All UniConfig instances must use same settings.","Option to change/read these settings using CRUD RESTCONF operations.","Configuration that is placed in the configuration file must be used only as initial configuration.","API","RESTCONF API used for reading and modification of all Kafka settings is described by following YANG model:"]},{"i":"installationuninstallation-of-multiple-devices-in-one-rpc","l":"Installation/Uninstallation of multiple devices in one RPC","p":["Added RPCs for installation or uninstallation of multiple devices in the single RPC call. The advantage of this approach in comparison to install-node/uninstall-node RPC is that UniConfig can schedule installation tasks in parallel.","Up to 20 devices can be installed at once.","API","Added RPCs into connection-manager YANG module:"]},{"l":"Added list of node-ids into snapshot-metadata","p":["Added list of node-ids, that are inside particular snapshot, into snapshot-metadata.","API","Added ‘nodes' leaf-list (’snapshot-manager.yang'):"]},{"i":"api","l":"\uD83D\uDCBB API","p":["Added following element into calculate-diff RPC output:","Added checkForReferences query parameter.","Default value is false - if it is set to 'true', then validation is done before application of modification into data-tree."]},{"l":"Introduction of transaction idle-timeout","p":["Idle timeout is more useful/practical than existing ‘absolute’ timeout, especially for long-running workflows - it will minimise the chance that transaction will be dropped after some operation started.","Transaction idle timer is refreshed after transaction is retrieved from registry (-> at invocation of some operation from RESTCONF).","Timed-out transaction is cleaned using existing cleaner.","Idle timeout is configurable only globally (config file).","Absolute timeout is not removed - it coexist with added idle-timeout."]},{"i":"configuration-1","l":"Configuration","p":["Updated configuration section in lighty-uniconfig-config.json- added 'transactionIdleTimeout’ property:"]},{"l":"Install-node RPC","p":["Added new parameters (uniconfig-config:crypto) into install-node RPC:","'uniconfig-config:crypto' - It allows to specify path to public key on device - ‘public-key-path’ (leaf with RFC-8040 path) and cipher type (by default, RSA is used) - ‘public-key-cipher-type’. If path to public key is specified and it exists on device, then Global-device encryption model is used. Otherwise, Global-only encryption model is selected.","'netconf-node-topology:yang-module-capabilities' - If auto-loading of YANG module with encrypted paths is not used and device itself doesn’t specify encrypted leaves, then it is necessary to side-load YANG module with encrypted paths. This parameter is relevant only on NETCONF nodes. Side-loaded modules must be expressed in the format of NETCONF capabilities."]},{"i":"configuration-2","l":"Configuration","p":["Global RSA key-pair is stored inside PEM-encoded files in the ‘rsa’ directory under UniConfig root. Name of the private key must be ‘encrypt_key’ and name of the public key must be ‘encrypt_key.pub’. If user doesn’t provide these files, UniConfig will automatically generate its own key-pair with length of 2048 bits. All UniConfig instances in the cluster must use the same key-pair.","Encryption settings are stored in the ‘config/lighty-uniconfig-config.json’ file under ‘crypto’ root object.","'encryptExtensionId' - If this setting is not defined, then encryption is disabled despite of other settings or install-node parameters. The value must have the format [module-name]:[extension-name] and specifies extension used for marking of encrypted leaves/leaf-lists in YANG modules. Corresponding YANG module, that contain this extension, can be part of device/unistore YANG schemas or it can be side-loaded during installation of NETCONF device as imported module from ‘default’ repository.","'netconfReferenceModuleName' - Name of the module for which NETCONF client looks for during mounting process. If UniConfig finds module with this name in the list of received capabilities, then it uses its revision in the lookup process for correct YANG module with encrypted paths (using deviations).","'netconfEncryptedPathsModuleName' - Name of the module which contains deviations with paths to encrypted leaves/leaf-lists. There could be multiple revisions of this file prepared in the ‘default’ NETCONF repository. NETCONF client in the UniConfig chooses the correct revision based on ‘netconfReferenceModuleName’ setting. Together, ‘netconfReferenceModuleName’ and ‘netconfEncryptedPathsModuleName’ can be used for auto-loading of encrypted paths for different versions of devices."]},{"l":"Uniconfig-client API","p":["Added InstallDeviceWithEnabledEncryption example:"]},{"i":"supported-ordered-listleaf-list-operations-restconf--netconf","l":"Supported ordered list/leaf-list operations (RESTCONF & NETCONF)","p":["RESTCONF RFC-8040 supports 2 additional query parameters for PUT and POST methods - ‘insert' and 'point’, see:","RFC 8040 - section 4.8.5","RFC 8040 - section 4.8.6","Using these parameters, it is possible to place list entry to specific position in the list. The 'insert' query parameter can be used to specify how an item should be inserted within an list or leaf-list. The 'point' query parameter is used to specify the insertion point for an item that is being created or moved within an 'ordered-by user' list or leaf-list. Like the 'insert' query parameter.","In the NETCONF client, UniConfig uses edit-config 'insert' attribute to put list entry to the specific position, see:","RFC 6020 - YANG"]},{"l":"API","p":["Introduction of schema for keeping information about the latest YANG repository identifier.","It is configurable using RESTCONF."]},{"i":"introduction-of-rename-patch-operation","l":"Introduction of 'rename' patch operation","p":["This PATCH operation can be used for changing values of one/multiple keys that identify some list entry. In the RESTCONF API it was not possible to directly update values of keys.","New PATCH operation with identifier 'rename'.","‘target’: identifier of original list entry","'point': new identifier of list entry"]},{"l":"Separate UniConfig errors to more type","p":["Updated 'frinx-type' YANG module (previously there were processing-error and no-connection error types)."]},{"i":"implementation-of-rfc-8072-patch-operation-1","l":"Implementation of RFC-8072 PATCH operation","p":["Example:"]},{"i":"added-missing-protocols-to-l2-for-ios-xe-cli-units-1","l":"Added missing protocols to L2 for IOS XE (cli-units)","p":["Added enumerations into 'frinx-cisco-if-extension' YANG module (openconfig):"]},{"l":"YANG packager","p":["implemented tool for validation and loading of YANG repository","API:","User can find corresponding script it in the utils/ directory (part of distribution).","Script './convertYangsToUniconfigSchema' contains four arguments. Each one has its own identifier so user can use any order of arguments.","Two arguments are required, namely the path to resources that contain YANG files and the path to the output directory where user wants to copy all valid YANG files. Other three arguments are optional. First one is the path to the \"default\" directory which contains some default YANG files, second one is the path to the \"skip-list\" and last one is a \"-to-file\" flag, which user can use when he wants to write a debug output to file.","-i /path/to/sources - required argument. User has two options for where the path can be directed:","to the directory that contains YANG files and other sub-directories with YANG files","to the text-file that contains defined names of directories. These defined directories have to be stored on the same path as text-file.","-o /path/to/output-directory - required argument. User can define path where he wants to save valid YANG files. Output directory must not exist.","-d /path/to/default - optional argument. Sometimes some YANG files need additional dependencies that are not provided in source directories. In this case it is possible to use path to the 'default' directory which contains additional YANG files. If there is this missing YANG file, YANG packager will use it.","-s /path/to/skip-list - optional argument. User can define YANG file names in text file that he does not want to include in conversion process. This file must only contain module names without revision and .yang suffix.","-to-file - optional argument. When user uses this flag, then YANG packager also saves the debug output to a file. This file can be found on a same path as 'output-directory'. It will contain suffix '-info' in its name. If the output directory is called 'output-directory', then the file will be called 'output-directory-info'."]},{"l":"UniConfig notifications about RESTCONF requests","p":["Publishing all RESTCONF traffic into PostgreSQL ‘notification' relation and Kafka 'restconf-notifications’ topic.","API","Created YANG model for RESTCONF notifications:","Configuration:"]},{"i":"bug-fixes","l":"❌ Bug Fixes"},{"l":"Fixed UniConfig rollback for CLI devices","p":["Rollback operation after failed commit, that included some CLI devices, was not working at all.","Fixed by re-implementation of the rollback process."]},{"l":"Filtering operational data from read NETCONF device configuration","p":["There are some devices that report both configuration and operational data via gRPC even if UniConfig reads only configuration data.","Fixed by explicit removal of operational data elements from read configuration before writing this configuration into database."]},{"l":"Fixed capturing of command response from Telnet session","p":["The size of internal buffer was hard-coded - now it is flexible based on number of received bytes from Telnet session. It caused trimming of command output in the execute-and-read RPC response."]},{"l":"Fixed deadlocks caused by superfluous synchronisation in transaction manager","p":["Synchronisation of component that is responsible for loading/creation/closing of transactions was unnecessary constrained - it resulted in dead-locks, especially when one UniConfig transaction was accessed asynchronously from different threads."]},{"l":"Fixed lost ordering of list elements after reading of some data","p":["If user read both ‘configuration' and ‘operational’ list elements using RESTCONF API (’content=all' query parameter), order of elements was lost during merging of these two sets.","After fix, configuration elements are displayed first, then operational-only elements are displayed."]},{"l":"Fixed interrupted ping command executed by Device Discovery service","p":["If user executed device discovery RPC with more IP addresses than the capacity of internal thread pool, some scheduled ping tasks were cancelled by timeout process.","Removed timeout from thread pool - tasks wait in the queue without time limit."]},{"l":"Fixed deadlock between transaction closing and UniConfig operation","p":["Procedure for closing transaction is called either explicitly using close-transaction RPC or automatically from transaction cleaner.","If at the same time some transaction is used in the invoked UniConfig operation, then it may lead to the deadlock - using transaction that was expired and is being closed.","Fixed by synchronisation of there events in the transaction manager."]},{"i":"get-template-info-operation-must-be-part-of-read-only-transaction-uniconfig-client","l":"Get-template-info operation must be part of read-only transaction (uniconfig-client)","p":["This operation was only part of read-write transaction."]},{"i":"when-notifications-are-enabled-uniconfig-log-is-getting-filled-with-psqlexception-continuously","l":"When notifications are enabled, uniconfig log is getting filled with PSQLException continuously","p":["Subscription table was not locked in the loop used for acquiring free subscription to NETCONF streams. Instead, pg_locks system view was locked. It led to various issues with permissions.","Fixed by not locking instances in the pg_locks view, but only instances in the subscription table."]},{"l":"Installation of device with bad password getting wrong behavior","p":["Error message was not correctly propagated into RPC install-node output.","Fixed - it will contain error message “mountpoint was not succesfully created“."]},{"l":"Fixed ignoring of unknown elements received from NETCONF device","p":["Even if ‘strict-parsing' was set ‘false’, sometimes NETCONF client didn’t ignore unknown elements that were placed under parent node of type 'list'."]},{"l":"Fixed downloading of schemas from NETCONF server running on netconf-testtool","p":["Downloading of schemas from simulated device (netconf-testtool) didn't work at all. User had to provide YANG schemas of simulated device manually to UniConfig ‘cache’ directory."]},{"l":"Fixed JSONB filtering for UniStore topology","p":["JSONB filtering feature didn’t work on configuration under unistore nodes"]},{"l":"Fixed calculate-diff RPC with updated root leaves","p":["Calculate-diff RPC failed if there were some updated/created/removed root leaves."]},{"l":"Fixed disconnecting CLI because of invalid characters in the prompt","p":["If the commands that are executed are too long, an incorrect character will appear which prevents the CLI from processing the prompt and causes the application to hang.","Fixed by ignoring of such characters during parsing of returned command prompts from device."]},{"l":"Fixed closing of UniConfig transaction after failed commit operation","p":["If commit RPC failed unexpectedly (500 status code), then UniConfig transaction was not closed and stayed hanging and blocking other transactions that would do modifications on the same nodes.","Fixed by closing UniConfig transaction always at the end of commit RPC if it was not closed by operation itself."]},{"l":"Fixed handling of incorrect input pagination parameters","p":["Returning 400 error message if input is not correctly formatted.","Example:"]},{"l":"Fixed providing of multiple slf4j bindings on classpath","p":["Keeping only one slf4j implementation on classpath, so there aren’t any conflicts."]},{"l":"Stop closing of configuration mode in the UniConfig shell after each commit operation","p":["State before:","State after:"]},{"l":"Fixed writing of augmentation data at commit operation to southbound layer","p":["This is a regression introduced during implementation of “validation” and “confirmed commit” features. Fixed by wrapping of augmentation nodes to non-mixin parent containers."]},{"l":"Fixed validate RPC output with empty input","p":["After modification of multiple nodes in the transaction, validate RPC with empty input:","Returns back only:","But it must contain all modified nodes."]},{"l":"Fixed ordering of entries in the transaction-log","p":["Committed transactions must be sorted by time when transaction was committed. Previously, the order was random."]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements"},{"l":"Removed old draft-02 RESTCONF implementation","p":["We stopped using old RESTCONF implementation.","Only new RESTCONF RFC-8040 is supported."]},{"i":"configuration-3","l":"Configuration","p":["Removed “jsonRestconfServiceType“ setting from “lighty-uniconfig-config.json”:"]},{"l":"Removed option to turn off transactions","p":["This setting was confusing, because turned on transactions still support both immediate-commit-model and build-and-commit models."]},{"i":"configuration-4","l":"Configuration","p":["Removed “uniconfigTransactionEnabled“ from configuration file:"]},{"i":"improved-invalid-nesting-of-data-error-message","l":"Improved 'Invalid nesting of data' error message","p":["This error occurred without and descriptive message, if user put some list without specification of correct brackets in the input JSON body.","Improved error message - it points to the place/element at which error occurred (parent element)."]},{"l":"Removed AutoSyncService","p":["This component was responsible for automatic reading of some configuration after pushing configuration to device.","However such process was not very visible to user, it could cause issues - we decided to remove it, so similar functionality must be implemented on application layer."]},{"l":"Specification of default directory in the YANG packager utility","p":["The packager script expected to have ‘default’ as the name of the default directory. It must be able to accept any file name after -d parameter."]},{"i":"separate-uniconfig-errors-to-more-type-1","l":"Separate UniConfig errors to more type","p":["Introduction of more granular error types that are returned in the response messages of UniConfig RPC operations.","User should be able to identify in what component/layer of UniConfig, the error occurred."]},{"i":"enabledisable-notifications-per-topic","l":"Enable/disable notifications per topic","p":["Previously it was only possible to enable/disable notifications globally (all topics).","Added option per topic to enable/disable notifications.","Added 3 new leaves that are placed under “kafka-settings“ container","API","Confiruration","Initial configuration can be specified from lighty-uniconfig-config.json file:"]},{"l":"Renamed elements in notification system","p":["Goal - improved readability.","subscription list → netconf-subscription","topic name restconf-notifications → audit-logs","API","Updated subscription list and YANG module name:","Renamed restconf-notifications module:","Updated topic name for RESTCONF notifications:","Configuration","Updated topic name and corresponding field name:"]},{"l":"Removed AAA","p":["Removed AAA code from UniConfig.","AAA was used for:","RESTCONF authentication (basic) - not needed, it can be provided by application gateway","encryption in NETCONF - moved corresponding functionality to NETCONF module","user identification - not needed, this functionality will be covered by tracing logs","API","Removed “user-id“ from “audit-logs“ module:","Removed “username” from “transaction-log” module:"]},{"i":"uniconfig-shell-ability-to-configure-multiple-leafs-with-single-set-operation","l":"UniConfig shell: Ability to configure multiple leafs with single SET operation","p":["If there are multiple leaves under same container/list, user should be able to configure them in the single command line.","API:","Sample YANG model:","Commands for setting client-alive-interval and client-alive-count-max:","New approach:"]},{"l":"Removing unused UniConfig monitoring system","p":["Removing of following field from UniConfig instance DB relation - backup-instance.","Removing periodical monitoring of UniConfig instances (component in the UniConfig layer) and taking leadership over nodes in the cluster.","Removing unused DB business API services that were used in the [1] and [2].","Configuration","Before changes:","After changes (removed multiple settings):"]},{"l":"Removed old UniStore implementation","p":["UniStore was previously implemented separately from UniConfig. Now it is integrated into UniConfig with distinct topology identifier 'unistore'."]},{"l":"Using cached thread-pool in the device-discovery service","p":["There was a fixed thread-pool that kept all the threads open all the time.","Using cached thread-pool with a small initial thread amount and higher max thread amount e.g. CPU_COUNT * 8."]},{"i":"configuration-5","l":"Configuration","p":["Added “maxPoolSize“ setting to configuration file:"]},{"i":"display-only-sub-structure-with-show-command-in-uniconfig-shell","l":"Display only sub-structure with \"show\" command in UniConfig shell","p":["Before patch:","After patch (just displaying what's there inside settings/system accordingly):"]},{"l":"Providing default UniStore node id in the UniConfig shell","p":["When we create a new UniStore node we manually had to give it a node-id. Say, we are configuring ssh now, it needs to be a generic command which doesn't expect the node-id to be given by the user.","Before patch ('new' is the node identifier):","After patch:","Configuration:","Default UniStore node identifier can be configured in the lighty-uniconfig-config.json (default value is 'system'):"]},{"l":"Removed unused Maven plugins","p":["Removed unused Maven plugins that are executed during build process and thus making building longer."]},{"l":"Removed AspectJ from UniConfig","p":["AspectJ makes code more error-prone and complex for debugging - removed usage of this library in the RESTCONF and dependencies."]},{"i":"documentation-additions","l":"\uD83D\uDCDC Documentation additions"},{"i":"validation-of-leaf-refs-1","l":"Validation of leaf-refs","p":["Validation of leaf-ref YANG constraints that are affected by some create/delete/update operation:","leafref-validation"]},{"l":"idle-timeout","p":["Introduced transaction idle-timeout","Updated configuration section in ‘“lighty-uniconfig-config.json” - added 'transactionIdleTimeout’ property:"]},{"l":"Encryption","p":["UniConfig uses asymmetric encryption for ensuring confidentiality of selected leaf and leaf-list values."]},{"i":"insert--point","l":"Insert & Point","p":["RESTCONF RFC-8040 supports 2 additional query parameters for PUT and POST methods - ‘insert' and 'point’"]},{"l":"Hashing","p":["UniConfig supports 'iana-crypt-hash' YANG model for specification of hashed values in data-tree using type definition 'crypt-hash'."]},{"l":"Templates","p":["Added information about usage of the templates"]},{"i":"rename-patch-oper","l":"Rename patch oper.","p":["This PATCH operation can be used for changing values of one/multiple keys that identify some list entry.","Rename"]},{"l":"Kafka clustering","p":["Random distribution of subscriptions to NETCONF notifications streams and turning on/off UniConfig instances may lead to scenario when one of the UniConfig instances in the cluster contain most of the subscriptions while others unequally smaller number."]},{"l":"YANG Patch","p":["Invocation of PATCH that may contain multiple edits."]},{"i":"uniconfig-whitelist-1","l":"UniConfig whitelist","p":["List of root YANG entities that should be read. This parameter has effect only on NETCONF nodes.","Whitelist"]},{"i":"yang-packager-1","l":"YANG Packager","p":["Implemented tool for validation and loading of YANG repository"]},{"l":"Install multiple nodes","p":["Added RPCs for installation or uninstallation of multiple devices in the single RPC call. The advantage of this approach in comparison to install-node/uninstall-node RPC is that UniConfig can schedule installation tasks in parallel.","Uninstall multiple nodes"]},{"l":"Snapshot-metadata","p":["Added list of node-ids, that are inside particular snapshot, into snapshot-metadata."]}],[{"i":"uniconfig-429","l":"UniConfig 4.2.9"},{"l":"UniConfig","p":["[BUG FIXES]","[IMPROVEMENTS]","[NEW FEATURES]","added GNMi southbound protocol","added node list into snapshot-metadata - it contains information about nodes that are captured using snapshot - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/snapshot-manager/obtain_snapshot_metadata/obtain-snapshot-metadata.html","don't fail dry-run commit if there aren't any changed nodes","fixed behaviour of validate RPC","fixed calculate-diff with changed root leaf","fixed calculate-diff: uniconfig-native branch didn't work fine with updated leaf nodes under choice nodes","fixed comparison and updating of configuration fingerprints(synchronization issues between DB and UniConfig cache)","fixed DeviceDiscovery: parsing of NULL hostname","fixed displaying whole list content using UniConfig shell","fixed dry-run commit - it closed transaction if list of target nodes was empty","fixed replace-conf-with-oper - NullPointerException","fixed transaction leak (CLI shell)","fixed using of UniConfig on machines with less than 4 CPU cores","get-template-info RPC: showing information about all variables in specified template","implementation of git-like diff that shows diff output with git-like marks - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/uniconfig-node-manager/rpc_calculate-git-like-diff/calculate-git-like-diff.html","implemented RPC to verify install status for a set of node-ids - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_check_installed_devices/check-installed-devices.html","improved apply-template RPC: added type safety - application of value to variable with specified type","install-multiple-nodes / uninstall-multiple-nodes (RPC) - option to install/uninstall multiple devices using one request - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/uniconfig-node-manager/uniconfig_install_multiple_nodes/install-multiple-nodes.html","introduced unistore topology for storing settings / 'dummy' device configuration - supported commit (persistence of unistore nodes), replace-config-with-oper, and calculate-diff operations - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/unistore-api/unistore.html","logging of transaction ID","UniConfig shell - prompt user for commit if they leave config mode after changes were made"]},{"l":"CLI","p":["[NEW FEATURES]","logging CLI request and responses (logging broker) - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/operational-procedures/logging/logging.html#cli-messages","[BUG FIXES]","fixed closing of CLI mountpoint created using lazy CLI strategy","fixed propagation of error message from mount process into install-node RPC output"]},{"l":"RESTCONF","p":["[NEW FEATURES]","immediate commit model - automatic creation of new transaction per user request - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/immediate-commit-model/immediate-commit-model.html","support HTTP2 on server side","[BUG FIXES]","fixed displaying of candidate nodes from non-existing augmentations","fixed unclosed/leaked UniConfig transaction","fixed parsing of multi-level fields query parameter","[IMPROVEMENTS]","making module-name prefix optional in value of fields query parameter"]},{"l":"NETCONF","p":["[NEW FEATURES]","exposed strictParsing parameter into NETCONF mountpoint - ignoring unknown elements received from NETCONF server - documentation: https://gerrit.frinx.io/c/Frinx-docs/+/11724","sorting of list elements by one or multiple fields - documentation: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/uniconfig-operations/restconf/restconf.html#sorting","[IMPROVEMENTS]","reducing logs generated by NETCONF cache loader","updated naming of pagination query parameter"]},{"l":"TRANSLATION-UNITS-FRAMEWORK","p":["[IMPROVEMENTS]","sending list size hint to translation unit writers"]},{"l":"CONTROLLER","p":["[IMPROVEMENTS]","logging creation/closing of UniConfig transaction","removed transaction-log limit from database","[BUG FIXES]","handling of errors that occur in readers/writers","fixed reading snapshot-metadata from database","fixed JSONB filtering: parsing of embedded paths"]},{"l":"SWAGGER","p":["[NEW FEATURES]","added option to ignore config nodes in order to produce oper only documentation","added range constraints to leaves","enable Maven swagger generator for uniconfig native models","[IMPROVEMENTS]","removed swagger path generator for old restconf","[BUG FIXES]","fixed description generator for leaves"]},{"l":"NETCONF TRANSLATION UNITS","p":["[BUG FIXES]","re-enabled XR-6 models","fixed XR-6 interface configuration writer (MTU)","[IMPROVEMENTS]","decreased surefire heap to 2G","optimization: stop recreation of NetconfAccessHelper","set max heap to 4G when running unit-tests to avoid outOfMem exception when running tests"]},{"l":"CLI TRANSLATION UNITS","p":["[Huawei]","created units: login banner, HTTP commands, sysname command, VLAN, telnet and ssh, user-interfaces, RADIUS, QoS, ipv6 and traffic-filter commands","fixed: mounting Huawei device","[SAOS6]","created units: local/remote interfaces, deleting VLAN and physical interface","fixed: reading metadata, ordering of commands for adding network instances","improved: the way to determine if the ring is major or sub ring","[SAOS8]","fixed: reading interface sub_ports, reading metadata","[IOS/IOS-XE]","fixed: deleting all service instances, reading metadata, prefix-lists with 0 entries not reconciled ipvpn, handling invalid MTU value, parsing ACL set"]}],[{"i":"uniconfig-428","l":"UniConfig 4.2.8"},{"l":"UniConfig","p":["[NEW FEATURES]","UniConfig shell: basic CRUD operations (configuration/operations mode), RPC calls, YANG actions.","Validate RPC: validation of NETCONF configuration by target device.","Device discovery RPC: searching for open TCP/UDP ports on target hosts ICMP reachability.","[IMPROVEMENTS]","Simplification of UniConfig RPCs in the transaction: RPCs(is-in-sync, commit, checked-commit, replace-config-with-operational, calculate-diff, sync-from-network, dryrun-commit) should work now with empty input. If the input is empty, operation will be invoked on all touched nodes.","[FIXES]","Unified representation of empty snapshot metadata - it will return 404.","Propagation of southbound error message to Uniconfig layer after failed installation."]},{"l":"CONTROLLER","p":["[NEW FEATURES]","Auto-generation of local UniConfig instance name, if it is not set in the configuration file.","[FIXES]","Fixed persistence of templates: fixed extraction of node-id from path.","Fixed omitting of module-name from URI: skip openconfig/native-CLI augmentations from created UniConfig-native schema.","Fixed parent module lookup when resolving leafrefs- parent module was mapped not to parent, but the submodule itself.","Fixed parsing of source-ids from YANG files- don't inherit revision from parent module.","[IMPROVEMENTS]","Improved error message on failed building of schema context.","Optimized YANG schema cache: Removed in-memory schema cache listener that was caching bulky AST form of all sources. Caching of them is not valuable anymore because there is only 1 schema context per device-type."]},{"l":"SWAGGER","p":["[FIXES]","Removed trailing slash from generated URIs (conforming RFC-8040 format).","Fixed importing of 4.0.0-alpha-1-SNAPSHOT (maven-core).","[IMPROVEMENTS]","Stop emitting operational nodes in swagger.","Adding snapshots-metadata and tx-log to generated swagger-api."]},{"l":"CLI","p":["[FIXES]","Fixed initialization of SSH session: Enforced following order of messages in SSH client - Protocol (SSH-2.0-APACHE-SSHD-2.4.0), Protocol (SSH-2.0-Cisco-1.25), Key Exchange Init, Key Exchange Init(some devices don't accept switching Protocol and Key Exchange Init messages).","Fixed setting infinite number of reconnection attempts."]},{"l":"NETCONF","p":["[NEW FEATURES]","NETCONF PKI data persistence: persistence of crypto information in the file-system.","[FIXES]","Capturing error message from SSH session initialization process.","Fixed setting infinite number of reconnection attempts.","Fixed self-reconnection of NETCONF session (issue with keepalive timer).","Fixed netconf testtool in mdsal-persistent-mode - do not share Datastore across all devices.","Fixed overwriting IETF schemas by UniConfig shcemas in netconf-testtool.","[IMPROVEMENTS]","Removed unused netconf-ssh classes.","Improving the way of printing NETCONF reconnection attempts.","Testtool: Enable manipulation of operational data over NETCONF."]},{"l":"RESTCONF","p":["[NEW FEATURES]","Pagination: get-count, limit, and start-index query parameters.","[FIXES]","Fixed adding schema-respoitory parameter to PATCH operation.","Fixed serialization of identityref key value."]},{"l":"CLI TRANSLATION UNITS","p":["[FIXES]","[IMPROVEMENTS]","[NEW FEATURES]","Huawei: Add caching for \"display current-configuration\" command.","Huawei: created TU for AAA.","Huawei: created TU for ACL.","Huawei: created TU for physical, VLAN interfaces, sub-interfaces.","Huawei: created TU for trunk and access VLANs.","Huawei: Read interfaces of Huawei devices with \"display interface brief\".","Huawei: Updated parsing of output for L3-VRF.","IOS XE: Fixed missing some information about route maps for IOS.","IOS XE: Fixed sending \"dot1q 1-4094\" to IOS XE devices.","SAOS6: All interfaces cannot be marked as Ethernet.","SAOS6: Changed name for l2vlan interface to \"cpu_subintf_\" l2vlan name.","SAOS6: Fixed creation of sub-port on EthernetCsmacd interfaces.","SAOS6: Reading all interfaces from ciena devices using command\"interface show\"."]},{"l":"NETCONF TRANSLATION UNITS","p":["[FIXES]","Fixed importing ietf-inet-types - there are multiple revisions available in the UniConfig.","[IMPROVEMENTS]","Speed up device model build by disabling various maven plugins."]},{"l":"OPENCONFIG","p":["frinx-huawei-network-instance-extension - added network-instance extension.","frinx-saos-if-extension - added ipv4 and ipv6 address extension.","frinx-cisco-if-extension - the dot1q value type is changed from int to string and the range is saved as a string.","frinx-acl-extension - ACL for huawei devices","frinx-openconfig-aaa, frinx-openconfig-aaa-radius, frinx-openconfig-aaa-tacacs, frinx-openconfig-aaa-types, frinx-huawei-aaa-extension - added aaa and radius modules from openconfig.","frinx-huawei-if-extension - added yang for huawei interface and sub-interface extensions.","frinx-openconfig-bgp-types, frinx-openconfig-extensions -fixed bug with community set values."]}],[{"i":"uniconfig-427","l":"UniConfig 4.2.7"},{"l":"Uniconfig","p":["[FIXES]","[IMPROVEMENTS]","[NEW FEATURES]","Added UniConfig transaction-id as fingerprint for devices not supporting it.","Adjusted persistence of mount information - node with the same ID may be present in both CLI/NETCONF topologies - and node only from one topology at the same time can be used for installation on UniConfig layer (configuration is synced and parsed).","Changed native-CLI architecture - UniConfig calls native-CLI readers/writers directly using BI API - BA translation layer provided by Honeycomb is redundant.","Fixed calculate-diff - Removing the whole list node with all list entries.","Fixed commit output: if the configuration of one of the nodes fails at any phase, then the outputs for all nodes will always contain a rollback flag.","Fixed creation/removal of dry-run Unified mountpoint - synchronization problems.","Fixed dry-run commit - Dry-run commit should trash journal of nodes that haven't been 'touched'.","Fixed losing of some tags in DOM nodes (application of template)","Fixed reading of uniconfig-native flag - unboxing of null Boolean to boolean.","Fixed rollback operation after commit/checked-commit.","Fixed sync-from-network for unavailable nodes - Comparison of config fingerprints failed for nodes that are unavailable because reading of fingerprint failed.","Fixed transfering of template tag from template to uniconfig topology at apply-template RPC (it should not happen).","Fixed version-drop in copy RPC.","Fixed writing ordered-map nodes during string substitution process(application of template).","Handling reordering of list entries in the calculate-diff - instead of sending delete+replace operations to the southbound layer.","Implementation of get-installed-nodes RPC: used for listing installed UniConfig nodes.","Implementation of revert-changes RPC: reverting transaction that is stored in transaction-log and identified by unique UUID.","Implementation of transaction-tracker (transaction-log): tracking of successfully committed data.","Improved error messages - using serialized form of YangInstanceIdentifier in logs or error messages, if possible.","Improved error messages during application of template.","Improving the existing algorithm that collapses diff from honeycomb(parallel streams).","Integration of fingerprint validation to templates - writing of fingerprint of modified templates to database and verification of fingerprint before commit.","Introduction of install-node, uninstall-node, mount-node, and unmount-node RPCs - a new way to install nodes into UniConfig with split concepts of installation and mounting. Mounting is always done on demand and the mountpoint is alive as long as some transaction is using this mountpoint.","Introduction of UniConfig transactions - dedicated/shared transactions concept: multiple users can use UniConfig safely from isolated transactions. UniConfig RPCs are part of UniConfig transactions - information about transaction-id is passed from the RESTCONF layer into the UniConfig layer.","Making UniConfig instance stateless - data is separated from UniConfig (PostgreSQL database) and UniConfig doesn't keep persistent connection to devices. Data and connection recovery is not done by UniConfig instances anymore (coordination, monitoring, and recovery process is not orchestrated by UniConfig). From the view of data-tree, UniConfig is used only as a cache layer on top of PostgreSQL database and caching is done only in the scope of transaction.","Mark sync operation failed on empty config.","Removed data-tree cache layers on CLI and NETCONF layers - UniConfig directly writes data to CLI/NETCONF mountpoints - it simplifies syncing process too.","Removed snapshot limit - it is not used anymore since snapshots are stored in the database and this database should manage its storage limits.","Removed unused Karaf features.","UniConfig shell prototype: SSH server, RPC operations, simple read operation.","Using commit RPC for committing snapshots and templates.","Using distributed advisory locks provided by PostgreSQL for locking of UniConfig nodes during commit/checked-commit operation. If another transaction perfors commit at the same time, it will fail before execution of the second commit.","Validation of conflicts between different transactions: added data-tree and config fingerprint validation before commit / checked-commit RPC invocation."]},{"l":"Controller","p":["[FIXES]","[IMPROVEMENTS]","[NEW FEATURES]","Added synchronization when generating BA->BI codecs.","Added workaround for 'metadata not available' data-tree bug.","Allow positional information in YangInstanceIdentifier (useful for operations under ordered lists).","Allow users to specify attributes without module-name (template tags).","Breaking PUT modifications to specific modifications in the data-tree: improving 'optimistic lock' granularity.","Ensuring parents by merge: avoiding ridiculous errors when data-tree allows to write data to nodes which parent is missing.","Exposed simple container merge utility.","Extending RPC service by custom parameters that can be passed from RPC caller to RPC implementation.","Fixed creation of DocumentedException from XML (document may include redundant namespaces).","Fixed data-tree modifications: merge->put->delete operation chain.","Fixed disappeared tag from template data-tree.","Fixed leaked DB connection on health-check operation.","Fixed order in which database writers are called (adding priority to DatabaseWriter API).","Fixed race-conditions in 3-phase datastore commit.","Fixed searching for fallback context on nodes that were not mounted(uniconfig-native).","Fixed storing of the default schema repository into PostgreSQL.","Generalisation of NETCONF repository into YANG repository.","Implemented standalone DOM broker - stopping to use clustered/distributed DOM brokers.","Integration of Flyway library to Uniconfig: easier upgrading of database schema and migration of data.","Integration of JSONB filtering of configuration on the level of DAOs.","Integration of UniConfig transaction manager with database and datastore transactions - used for management of shared/dedicated transactions.","Introduction of embedded PostgreSQL for testing purposes - it can be enabled from the UniConfig configuration file.","Making the database layer more thread safe (using 'SELECT FOR UPDATE' in some queries).","Optimized creation of uniconfig-native schemas.","Persistence of logging configuration in PostgreSQL.","Persistence of snapshots in PostgreSQL.","Persistence of templates in PostgreSQL.","Persistence of transaction-log in PostgreSQL.","Preserving order of list/leaf-leaf elements in the data-tree.","Removed unnecessary dependencies of xtend maven plugin.","Removed unused Karaf features.","Replaced asynchronous DB API by synchronous DB API - JDBC connections are synchronous.","Separated persistence of UniConfig nodes and representing mountpoints.","Stop submitting datastore transactions - it must be closed - datastore is used only as cache.","Validation and locking of templates and UniConfig nodes on the level of UniConfig transaction."]},{"l":"Swagger","p":["[FIXES]","Fixed bug caused by swagger-uniconfig-go.","[IMPROVEMENTS]","Make openAPI generated for uniconfig more useful.","Added Unified layer models to swagger dependencies."]},{"l":"Translation units framework","p":["[NEW FEATURES]","Added native-CLI binding-independent API.","[IMPROVEMENTS]","Removed unused artifacts.","Optimized chunk cache - do not store entire writer in chunk cache, so GC can take care of writers as soon as possible.","Detection of complex reordering of list entries in diff output.","[FIXES]","Fixed commit rollback failing: the bug was caused by an attempt to execute an inverse command of an unsuccessful command."]},{"l":"CLI","p":["[IMPROVEMENTS]","Removed unused Karaf features.","Exposed binding-independent data support to native-CLI API.","Exposed services for direct device access to MP.","[FIXES]","Replace maxConnectionAttempts with maxReconnectionAttempts when reconnecting to the device after the first connection attempt is successful.","Replaced transactionChain (not working correctly) with direct dataBroker transactions.","Fixed device type checking - when a device was mounted with the wrong type, the generic symbol (\"\") was implicitly used as the type. The device was installed on all layers, but uniconfig/configuration was empty. Now we have to use correct device type or.","Fixed disabled CLI journaling (default value)."]},{"l":"NETCONF","p":["[NEW FEATURES]","Added maxReconnectionAttempts functionality into NETCONF client.","[IMPROVEMENTS]","Removed unused Karaf features.","Improved error message from parsing of NETCONF RPC response.","Removed akka actor dependency from NetconfCacheLoader.","Enable md-sal persistence accross sessions in NETCONF testtool.","[FIXES]","Fixed writing of netconf namespace prefix ('Namespace urn:ietf:params:xml:ns:netconf:base:1.0 was not bound, please fix the caller').","Fixed reading of the whole list/leaf-list from the device - it was reading the whole parent structure, not only the dedicated list.","Moving state to unable-to-connect after failed schema context building from device YANGs.","status is written to datastore, because mount-node RPC relies on OPER information only.","Fixed deadlock that may occur on removal of Unified MP."]},{"l":"RESTCONF","p":["[NEW FEATURES]","Added support for RESTCONF PATCH method that includes tags.","Integration of UniConfig dedicated/shared transaction to RESTCONF - cookie with transaction-id property, create-transaction RPC, and close-transaction RPC.","Introduction of jsonb-filter query parameter used for filtering of data committed to database.","[IMPROVEMENTS]","Removed unused Karaf features.","Using RFC8040 format for errors thrown from the transaction system.","[FIXES]","Fixed RESTCONF response/request logging.","Fixed reading of all available RPC operations.","Fixed NPE that is caused by Subject.getPrincipal() - extraction of authentication data from AAA.","Fixed serialization of ordered leaf list with attributes.","Fixed connection leak - read-only transaction was not always closed.","Fixed parsing of elements without module name: If there are some conflicts between children elements - multiple elements with the same name but in different modules exist - then we should return a proper error message.","Fixed use of fields query parameters with uniconfig-native nodes."]},{"l":"NETCONF translation units","p":["[IMPROVEMENTS]","Removed unused Karaf features.","[FIXES]","Fixed writer dependency in XR623 ISIS translation unit.","Ignored 'ios-xr lacp period 200' command - only 'lacp period short' is supported."]},{"l":"CLI translation units","p":["[FIXES]","[IMPROVEMENTS]","[NEW FEATURES]","Huawei: additions - global config reader and writer for bgp, neighbor config reader and writer, new augmentation fields for global and neighbor configurations.","Huawei: translation units - interfaces.","IOS XE: added ios-xe 15 and 17 to ios-xe module.","IOS XE: additions - media-type command, port-security commands, BDI type recognition, ethernet cfm mip command, cft commands, commands for bgp, prefix-list command, fhrp delay, bfd-template, split-horizon group in bridge-domain, added fallOverMode for vrf neighbor, IPv6 prefix-lists with prefix lengths, routing-policy, ipv6 vrrp, added synchronization and moved default-information in BGP, table-map, ip community-list command, redistribute command, bgp and interface commands, ipv6 commands, rewrite command, snmp trap, support for multiple l2protocols,.","IOS XE: created a distinct module for IOS-XE in cli-units.","IOS XE: fixed writing interface config, fixed unwanted lldp/cdp/switchport vlan commands commands, fixed IPv6 config writer template, fixed mounting of IOS XE (configuration metadata), fixed bridge-domain regex, fixed reading VLANs, fixed storm-control regex, fixed NPE in GlobalAfiSafiConfigWriter, fixed BgpAfiSafiChecks, fixed CommunityListConfigReader and L3VrfReader, fixed IndexOutOfBoundsException in BgpActionsConfigReader.","IOS XE: make sure all 'GigabitEthernet' interfaces are treated as physical, don't send unnecessary commands in interface unit, only send storm-control commands when needed, moved service instances and encapsulation in service instance in ios-xe/interface, edit readers and writers for bridge-domain, edited LLDP to not parse when default is set, speed up mounting","IOS XE: translation units - SNMP, LACP, privilege command, interfaces, l2protocol, evc, route-map, bgp and network-instance modules, vrf definition, fhrp version, ip commands, neighbor, ethernet cfm mip, negotiation auto.","IOS-XR: delete methods should always be readBefore, fixed calling get on a null value, fixed delete of mpls-te.","Movef service-policy from IOS/interface to IOS/QoS.","Removed unused Karaf features.","SAOS6: fixed virtual-circuit ethernet delete, fixed reading Virtual Ring data, fixed reading the range of vlans in virtual ring commands, reading default interface.","SAOS6: translation units: Ingress ACL.","SAOS6: use the same template for service as for profile schedulers.","SAOS6/8: added quotes into description.","SASO6: additions - commands for delete untagged attributes, unset description command, parsing ranges in ring protection.","SONiC: created init and interfaces unit."]},{"l":"Openconfig","p":["created frinx-openconfig-evc module","created frinx-privilege module","fixed Openconfig bug with nested augmentations (fixed resolving augmentations path)","frinx-bfd-extension: bfd-template-config","frinx-bgp-extension: added bgp extension for Huawei device, local-as-group, route-maps in redistribute commands, BGP neighbor, table-map in BGP, synchronization and moved default-information in BGP, added bgp fall-over mode, neighbor as-override, default-information originate,","frinx-cisco-if-extension: added negotiation auto, added support for multiple l2protocols, added support for rewert commands, vrf forwarding, ip commands, fixed L2protocol description, split-horizon group in bridge-domain, chaed bridge-domain type to string, fhrp delay, fixed bad order of augmentation in frinx-cisco-if-extension.yang, bridge-domain, added grouping for L2protocol for Service instance, added grouping for L2protocol for Service instance, move encapsulation in service instance, move service instances, created augmentation for service instances, cft cisco specific commands, added port-security,","frinx-cisco-ipvsix-extension: added yang extension for global ipv6 commands.","frinx-cisco-routing-policy-extension: prefix lengths in prefix-list, sequence-id, forwarding-action, route-map","frinx-cisco-vrrp-extension: added ipv6 vrrp augmentation, added vrrp-group augmentation,","frinx-oam: added ethernet cfm mip","frinx-openconfig-bgp-policy-extension: added community-list type,","frinx-openconfig-bgp-types: extracted typedefs for community union type.","frinx-openconfig-fhrp: fhrp version","frinx-openconfig-lacp: added ON lacp mode","frinx-qos-extension: moved service-policy from IOS/interface to IOS/QoS","frinx-snmp: added snmp-view config","removed unused Karaf features from openconfig"]}],[{"i":"uniconfig-426","l":"UniConfig 4.2.6"},{"i":"uniconfig","l":"UniConfig:","p":["new feature: introduced 3-phase commit - integration of validation and confirmed-commit features - here","new feature: templates can be used for reusing of some configuration and afterwards easier application of this configuration into target UniConfig nodes - storing of templates in UniConfig, modification of templates including tags using RESTCONF operations, and application of templats to target UniConfig nodes using apply-template RPC","new feature: added copy-subtrees RPCs - merge or replace whole subtrees: copy-one-to-one, copy-one-to-many, copy-many-to-one","new feature: added calculate-subtree-diff RPC - calcution of diff between two subtress in datastore","new feature: implemented uniconfig healthcheck - RPC checks UniConfig and database connection","fixed auto-sync service","fixed creation of Unified mountpoint for CLI device without available translation units - using only 'generic' units in this case"]},{"i":"controller","l":"CONTROLLER:","p":["improvement: removed 'native_prefix' from 'node' database relation - it is replaced by NETCONF repository name","fixed MDSAL union codec - it didn't work with boolean subtype"]},{"i":"cli","l":"CLI:","p":["fixed unmounting of CLI device: the case when mounting process hasn't successfully finished yet"]},{"i":"netconf","l":"NETCONF:","p":["new feature: NETCONF validate RPC and confirmed-commit RPC exposed by extension of DOM transaction","improvement: mounting NETCONF device with explicitly set NETCONF repository name that must be used - using this approach, it is not necessary to explicitly override/merge capabilities in the mount request - here","improvement: replacing uniconfig-native fingerprint by'schema-cache-directory' in NETCONF operational data","fixed mounting SROS device with specified ignoreNodes/namespaceBlacklist - here","fixed: unmounting of NETCONF device which mounting process hasn't finished yet","fixed: increased maximum NETCONF chunk size to 32*1024*1024"]},{"i":"restconf","l":"RESTCONF:","p":["new feature: introduced 'uniconfig-schema-repository' query parameter - explicitly set name of the schema using which input/output data is validated","new feature: JSON attributes - option to encode XML-like attributes into JSON structure: - here"]},{"i":"cli-translation-units","l":"CLI TRANSLATION UNITS:","p":["IOS: fix - QoS translation unit, added port-channel into interface type","IOS: added translation units - storm-control, standard ACL","IOS: refactoring - allowed vlans on trunk interface","SAOS: fixed translation units - statistics augmentation, command ordering, ethernet config reader/writer, ordering of VLAN and VC, order of CPE commands","SAOS: fixed initialization - committing configuration during initialization"]},{"i":"openconfig","l":"OPENCONFIG:","p":["frinx-acl-extension: added support for standard ACL","moved statistics from frinx-saos-vlan-extension to frinx-saos-vc-extension","frinx-cisco-if-extension: added storm control","frinx-qos-extension: extended and fixed support for IOS QoS"]},{"i":"known-issues","l":"Known Issues:","p":["The error message needs to be fixed to inform user about the name clash and how to fix it","ODL did not started if cache folder for SROS16 device is applied","BGP: NullPointerException occurs when configure network instances for XE","NETCONF: Junos 18 is can't be mounted by netconf Xrv6.2.3 device has been locked and session went down after specific set of commands","CLI: Performance issues when is more than 400 devices connected","RPC: Commit and Checked commit issues when invalid configuration has been applied to one router Transaction has been locked during checked commit no rollback when invalid configuration has been configured to one router"]}],[{"i":"uniconfig-425","l":"UniConfig 4.2.5"},{"i":"uniconfig","l":"UniConfig:","p":["new feature: show-connection-status RPC: it can be used for verification of status of selected nodes on CLI, NETCONF, Unified, and Uniconfig layers - here","new feature: filtering of data that is read from NETCONF mountpoint based on YANG extension that can be placed in the mount request ('uniconfig-config:extension' parameter) https://docs.frinx.io/frinx-odl-distribution/oxygen/user-guide/network-management-protocols/uniconfig_mounting/mounting-process.html#example-mounting-of-uniconfig-native-netconf-device","new feature: is-in-sync RPC: verification if UniConfig Operation datastore is in sync with device - here","new feature: introduced 'install-uniconfig-node-enabled' mount request parameter - option to not install node in the Unified and UniConfig layers - node would be installed only in the southbound layer - here","new feature: introduced uniconfig-native translation units used for reading and parsing of only configuration fingerprint","improvement: calculate diff for uniconfig-native nodes diff output shows difference also on the level of leaves and leaf-lists(better granularity)","fixed setting of maximum snapshot limit (passing 0 in input)","fixed uniconfig-native - mounting node using CLI and afterwards using NETCONF uniconfig-native didn't work as expected","fixed caching of read operational data: improved performance for nodes that are mounted via NETCONF translation units"]},{"i":"cli","l":"CLI:","p":["new component: creation of CLI flavour for SAOS devices for successfull reading and parsing of device configuration","new component: \"one-line-parser\" CLI parsing engine that uses grep function for parsing running-configuration","fixed synchronization of UniConfig operations (for example, commit RPC) and CLI RPCs (for example, execute-and-read)"]},{"i":"netconf","l":"NETCONF:","p":["new feature: added support for invocation of YANG 1.1 actions and TAILF actions - here","new feature: NETCONF edit-config test option - controlling validation of sent edit-config messages on NETCONF server - here","new feature: introduced 'default' NETCONF cache repository that can be used for side-loading of missing/fixed YANG schemas that are invalid/not provided by NETCONF device - here","new feature: introduced logging of whole NETCONF communcation - per-device NETCONF messages, notifications, and system events - here","improvement: added NETCONF cache directory (NETCONF repostory) into Operational datastore of NETCONF node","fixed authentication in NETCONF testtool (key-pair provider)","fixed parsing of NETCONF replies that contains multiple RPC errors(severity of error was not correctly considered)","fixed creation of NETCONF mountpoint - it was not blocking, so higher layers haven't caught events in the correct order","fixed loading of NETCONF cache repository into Operational datastore","synchronization issues","fixed propagation of user-friendly error messages from NETCONF layer into UniConfig RPC output"]},{"i":"restconf","l":"RESTCONF:","p":["new feature: subscription to NETCONF device notifications via websockets - here","new feature: invocation of YANG 1.1 actions and TAILF actions - here","new feature: invocation of PLAIN PATCH operation - here","new feature: schema filtering based on YANG extensions and deprecated YANG statement - reading and modification of data - here","new feature: introduced logging of whole RESTCONF communcation with option to hide fields with selected YANG type - here","improvement: improved RESTCONF error messages in case of invalid URI - displaying possible children nodes","fixed reading of whole list under augmentation/choice node"]},{"i":"controller","l":"CONTROLLER:","p":["new feature: introduced PostgreSQL persistence system for UniConfig nodes: persisting node configuration and NETCONF repositories into DBS with recovery system in the cluster - here","upgrade: using TrieMap dependency for data-tree implementation"]},{"i":"distribution","l":"DISTRIBUTION:","p":["added support for Java 11: compilation of all projects using JDK 11 and also running of UniConfig distribution using JRE 11","fixed invocation of UniConfig with \"--help\" argument","changed logging framework from log4j to logback","added \"--debug\" parameter for opening debug session"]},{"i":"translation-units","l":"TRANSLATION UNITS:","p":["fixed invocation of subtree writers based on wildcard path"]},{"i":"netconf-translation-units","l":"NETCONF TRANSLATION UNITS:","p":["XR6: added L3VPNIPV4UNICAST afi-safi type","XR6: fixed BGP neighbor reader","JUNOS17: fixed LACP units"]},{"i":"cli-translation-units","l":"CLI TRANSLATION UNITS:","p":["SAOS: create readers and writers for logical-ring","SAOS: fixed sending of commit command, parsing of port range, dependencies between writers, parsing of connection point key, interface subport writer, registering of interface writer, hardening update commands, L2VSICP writer, getAllIds in PortReader","IOS: added translation units: QoS, interface statistics, service-policy, VLAN, routing-policy","IOS: modified translation units: added next parameters into BGP, switchport mode options: dot1q && access, BGP neighbor version, SPEED parameter, ICMP type into ACL entry","IOS-XR: fixed LACP bugs: 'mode on' configuration is now explicit, subinterfaces were wrongly added to list of LAG interfaces","Arista: added init unit","Cubro: added CLI flavour"]},{"i":"openconfig","l":"OPENCONFIG:","p":["frinx-qos-extension: added support for CoS and DSCP in QoS","frinx-cisco-if-extension: added switchport mode options: dot1q, access","frinx-bgp-extension: added BGP neighbor version support","frinx-if-ethernet-extension: added interface SPEED parameter","frinx-cisco-if-extension: added port-type, snmp-trap-link-status, switchport-mode, switchport-access-vlan, switchport-trunk-allowed-vlan-add, ip-redirects, ip-unreachables, ip-proxy-arp, service-policy","created SAOS model extension (frinx-saos-virtual-ring-extension)","created Cisco BGP model extension (frinx-cisco-bgp-extension)","fixed frinx-bgp-extension YANG","fixed auto-generated yang docs"]},{"i":"known-issues","l":"Known Issues:","p":["The error message needs to be fixed to inform user about the name clash and how to fix it. ODL does not start if cache folder for SROS16 device is applied","BGP: - NullPointerException occurs when configure network instances for XE","NETCONF: - Junos 18 is can't be mounted by netconf - Xrv6.2.3 device has been locked after specific set of commands","CLI: - Performance issues when is more than 400 devices connected"]}],[{"i":"uniconfig-424","l":"UniConfig 4.2.4"},{"i":"uniconfig","l":"UniConfig:","p":["Added uniconfig node status- each node is in one of these states: installing, installed, failed","Added unified node status- each node is in one of these states: installing, installed, failed","bugfixing"]},{"l":"UniConfig Native","p":["UniConfig Native for CLI- new experimental feature allowing to communicate with devices in a native way using hand-written YANG models","Added sequence-read-active param- this forces UniConfig to read root configuration elements sequentially."]},{"l":"CLI","p":["Introduced RPC execute-and-expect- It is a form of the‘execute-and-read’ RPC that additionally may contain ‘expect(..)’ patterns used for waiting for specific outputs/prompts. It can be used for execution of interactive commands that require multiple subsequent inputs with different preceding prompts.","Introduced Tree-parser as CLI parsing strategy- device configuration is parsed into a tree. It provides faster lookup operations for reads.","Introduced native CLI- feature allows to define YANG models instead of translation units. YANG models need to be created based on device specific CLI commands"]},{"l":"OpenConfig","p":["added various extensions for Ciena TUs"]},{"l":"NETCONF","p":["bugfixing"]},{"l":"Translation units","p":["Added CLI translation units for Ciena SAOS6 and SAOS8","bugfixing"]}],[{"i":"uniconfig-423","l":"UniConfig 4.2.3"},{"i":"uniconfig","l":"UniConfig:","p":["create Lighty based distribution- removal of Apache Karaf altogether, this distribution is based on lighty.io","RPC input/output rework","Unification of RPC inputs/outputs","Prevent any network wide operations if no node id has been passed- All RPCs MUST specify node-id of nodes they are affecting","new UniConfig transactions- create-transaction, cancel-transaction are used in HA deployments","bugfixing"]},{"l":"UniConfig Native","p":["separate schema contexts based on device type- it allows to mount devices with same YANG models but different revisions"]},{"l":"Lighty","p":["adding of AAA support","adding of TLS support"]},{"l":"RESTCONF","p":["update to RFC-8040 based RESTCONF- only this version runs by default","usage of schema context based on device type for data parsing","creation of custom UniConfig JSON/XML parsers/serializers"]},{"l":"OpenConfig","p":["added models: ipsec, frinx-if-ethernet-extension","added various extensions for Brocade TUs"]},{"l":"NETCONF","p":["run-time loading of netconf cache repositories","division of netconf cache based on device type","creation of schema context from netconf-cache","bugfixing"]},{"l":"Translation Units","p":["bugfixing"]},{"l":"Known Issues","p":["JSON response for GET snapshots of UniConfig-native nodes contain generated prefix \"uniconfig--\" (e.g. native-529687306-Cisco-IOS-XR-ifmgr-cfg:interface-configurations). This issue does not have an impact on RPC replace-config-with-snapshot."]}],[{"i":"uniconfig-508-release-notes","l":"Uniconfig 5.0.8 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Created TU for Arris(CER) device","Install-node without mounting/syncing configuration from device","Option to divide OpenAPI files into modules"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Uniconfig-client: increased default HTTP response read timeout","Fixed NETCONF connection timeout","Fixed number of NETCONF reconnection attempts","Fixed waiting for NETCONF dry-run mountpoint","Fixed reading of default NETCONF parameters","Added 'get-template-info' RPC to oper mode (shell)","Huawei install DB parsing issue","Fixed memory visibility issues in MountpointRegistry","Fixed parsing junos xml configuration","Fixed parsing xml configuration with reordered lists items","Fixed list of available RPCs in UniConfig Shell"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Optimization of calc-diff RPC after replace-config-with-oper RPC","Install-node without mounting/syncing configuration from device improvements","Added missing attributes to SAOS6 Interface","Remove OSS index checks from owasp","Generate release notes during merge job"]}],[{"i":"uniconfig-509-release-notes","l":"Uniconfig 5.0.9 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implemented dedicated device sessions","Implementation of device locking states","Implementation of speed attribute for saos6 and saos8 (#21)","Expose kafka producer settings into java client","Added option to use list key delimiter in URI","Implementation of pm instances for port queue groups in saos8 (#11)","Expose kafka producer settings","Implementation of default vlans for saos6 (#10)","Implementation of auto-neg attribute for both saos6 and saos8 (#9)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed ordering of data inside transaction on SONiC device","Fix ignoring empty key","Fix serialization of keyDefinitions","Cleaned and fixed locking of nodes in uniconfig RPCs","Fixed generation of NETCONF message-id","Fixed JSOB filtering - creation of jsonpath and parsing output"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Updated writer template for saos6 and saos8 (#32)","Implementation of speed attribute for saos6 and saos8 (#21)","fix showing list entries in cli suggestions"]}],[{"i":"uniconfig-5010-release-notes","l":"Uniconfig 5.0.10 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed removing native prefix from snapshots (#57)","Fixed parsing GNMi GET response (augmentation content)","Fixed parsing result from immediate-commit model","Fixed lost list ordering after apply-template RPC"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Add a parameter so empty GET response returns 204"]}],[{"i":"uniconfig-5011-release-notes","l":"Uniconfig 5.0.11 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Remove namespace from response (#77)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["uc shell: update simple value and list value simultaneously","uc shell: transaction log ordering","VHD-162 Fixed Issue with With-Defaults param. (#68)","Serialization of int64, uint64, decimal types as string type","Changed order of executing remove and add vlans for saos6 (#75)","Fixed NETCONF reconnection attempts after connection timeout","Removed parent-node-id from NETCONF layer","Fixed synchronization of NETCONF session timeout"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Fixed ordering of data inside transactions for SONiC device","Implementation of common commands of relay agent for saos8 (#78)","Implementation of common commands of relay agent for saos6 (#70)","UC shell: autocompletion of nodes (#36)","Fix parallelism in apply-template RPC (#73)","Implementation of relay-agent sub-port command for saos8 (#47)","Changed default value of content query parameter to 'config'","Add sshd package to logback.xml with INFO level (#67)"]}],[{"i":"uniconfig-5012-release-notes","l":"Uniconfig 5.0.12 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Inclusion of unhide query parameter in PUT/POST/PATCH requests","Sync-to-network RPC"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Swagger - remove input container (#113)","Fix unhide param for write operations","Fixed default value of speed in ios (#95)","Changed isEmpty to null check (#96)","Fixed default value of speed in ios-xe (#88)"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger - Fix authorizations (#114)","change behavior of execute-and-expect RPC (#112)","Swagger - configurable description and toggle servers","Enabled TCP keepalive mechanism in JDBC connection","Added 'database-connection-client-port' to 'transactions-data'","Swagger - generating adjustmens","Using hideEmptyDataNodes parameter per request (#94)","updated write template of interface config for ios xe devices for use-cases where no changes are requested from the user (#91)","Improve schema context caching for gnmi devices"]}],[{"i":"uniconfig-5013-release-notes","l":"Uniconfig 5.0.13 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Add jsonb-filter in UC client","Automation of adding release notes to documetation","API for bulk addition of templates","Implementation of callbacks","Implementation of publishing shell notifications to kafka","JSONB filtering core","Upgrade-from-network as part of sync-from-network"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Cancellation of initial NETCONF RPCs after request timeout","Fixed parsing XML-endoded leaf with instance-identifier to list","Fixed synchronization of notification listeners","Releasing subscription that is bound to tangling mountpoint","Fixed construction of output with set with-defaults param","Fix gnmi unknown augmentations"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Implementing HideAttributes query-parameter per request. - Introduced query parameter HidesAttribute. Default value is 'false'. Hides all composite data-tree nodes attributes to the GET response.","Stop acquiring subscription that was released in the same iteration","Fixed and refactored DOMMountPointService implementation","Add support for template leaf hashing","Improved code and API of create-multiple-templates RPC","Implemented frinx-types:json-element in the JSON deserializer","Swagger - Grouping requests","Swagger - Remove patch operation","Bump Mockito and get rid of Powermock","Swagger: inclusion of action endpoints","YangPackager does not catch broken submodules","Refresh schema context for netconf southbound if device was upgraded","Make mountpoint service call listeners from different thread"]}],[{"i":"uniconfig-5014-release-notes","l":"Uniconfig 5.0.14 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Added a flag to disable confirmed-commit phase in commit RPC (#181)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Adding 'rsa_' prefix to encrypted data","Changed the way to get config metadata for ios xe devices","Disable html escaping in callbacks output","Fix adding release notes to documentation repository","Fix Flyway when using SSL encryption","Fix of incorrect UC behavior when on limit with DB connections","Fix parsing of sslPassword parameter","Fix setting of DbConnectionConfig parameters","Fixed creation of aug with admin-state leaf","Fixed detection and recovery from cyclic dependency error in YANGs (#161)","Fixed duplicate module lookup in path deserializer (RESTCONF) (#150)","Fixed encryption (#170)","Fixed parsing NETCONF action response","Fixed recovery of Cipher object","Fixed SAOS Qos TU writer","Jsonb-filter multiple schemas bugfix","Make tailf:info revision independent","Updated config metadata pattern in reader for ios xe devices (#174)"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Internal delete for trunk-vlans","JSONB-filter improvement"]}],[{"i":"uniconfig-5015-release-notes","l":"Uniconfig 5.0.15 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implementation of bulk-edit RPC"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixes for trunk-vlans handling"]},{"i":"api","l":"\uD83D\uDCBB API","p":["Defined API for bulk-edit RPC"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Do not use common fork join pool in DOMMountpointService"]}],[{"i":"uniconfig-5016-release-notes","l":"Uniconfig 5.0.16 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Added create-multiple-templates RPC to uniconfig-client","Added option to specify tags in create-multiple-templates RPC","Swagger: Grouping of RPCs and tailf:actions","Encrypt/Decrypt of password for gnmi/netconf/cli topologies"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Data-change-events: make node-id optional","Displaying the default unistore node in shell for callbacks","Fix Guava dependency in POM","Fix replace-list ordering","Fix using ignoredNamespaces from swagger-config","Fixed merging template tags into merge node in the same TX","Fixed parsing nodes with attributes","Fixed regex for trimming path in TransactionTrackerUtils (#241)","Fixed submitting changes to database if there are failed nodes and do-rollback is false (#227)","Fixed writing of unkeyed list entry node","Remove duplicate yang models","Removed non-working option to create unistore node in request and show states","Revert \"Install-node without mounting/syncing configuration from device\"","Swagger - fix list container wrapping","Swagger: Fix behavior of basePath and server generation","Swagger: Fix top level containers not generating","Wrap requests to make them RFC 8040 compliant"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Adjusted logging of keepalive messages (stream sessions)","Made amount of max parallel installs customizable (#204)","Made error message user-friendly when user enter nonexistent node-id","Make create-multiple-templates RPC 'atomic'","Mapped \"gig\" to SpeedType.Gigabit","Netconf rpc timeout (#257)","Optimization of callbacks","Refactored stream writers used by RESTCONF","Removed last pieces of CheckedFuture and old unused MDSAL-API","Removed unused jettison dependency","Replaced CheckedFuture by FluentFuture - DOM read transaction","Replaced CheckedFuture by FluentFuture - DOM store read transaction","Replaced CheckedFuture by FluentFuture - DOMRpcImplementation","Replaced CheckedFuture by FluentFuture - DOMRpcService","Replaced CheckedFuture by FluentFuture: TX submit()","Resolving CVE security issues between level 6 and 0","Set UNICONFIGTX cookie to entire domain not just /rests/","Specified create/close-transaction RPCs in YANG","Support upgrading of YANG repository content (#233)","Suppress CVE-2022-38752","Suppressing logs generated by received unknown requests (SSH)","Used FluentFuture in binding ReadTransaction"]}],[{"i":"uniconfig-5017-release-notes","l":"Uniconfig 5.0.17 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Swagger - filter path CRUD customization","Implemented invalid schema repository cleaner","Units-coverage RPC"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Commit fails if one of the touched node was uninstalled","Swagger - fix CRUD filter generation","Triggered commit hook if revert is successful","Fixed decryption of passwords in CLI layer (#301)","Add support for fetch=count with jsonpath filtering (#282)","Fix suppressed Jackson CVEs","Bump Apache commons-text to 1.10.0","Swagger - Fix GET operation generation for list nodes","Bump protobuf-java to 3.21.7","Changed order of executed commands for saos8 (#280)","Additional fix for dce global subscription in client.","Improve JSON parser error message","Changed the way of getting vlan name and egress-tpid","Localhost throws 500","Swagger: Fix operational API generation"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Replacing parallelStreams with more predictable alternatives","Swagger - add option to disable GET request generation for concrete list nodes","Add request timeout to gnmi session","Create modulesWithIgnoredNamespace list","Support install-multiple-nodes for gnmi","Swagger: Add content query parameter"]}],[{"i":"uniconfig-5018-release-notes","l":"Uniconfig 5.0.18 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implemented TU for collecting all the information in NTP MCS for Saos6 and Saos8 (#420)","Implemented low priority fields for collecting inventory for SAOS6 CEN(ring) (#341)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Swagger: Fix generation of arguments in path","Fixed cleanup of existing MP during mount-node RPC","Swagger: fix generation of choice nodes","Fixed encryption of sensitive info passed via template variables (#326)","fix Jsonb filter element filtering","Swagger - Fix request generation","Fixed passing leaf-list in shell callbacks"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Fix Logback and cleanup POMs","Swagger: Custom operational path","Improved reporting of parsing issues in RESTCONF","Swagger - Improve start file","Changed order of some commands for SAOS8 (#348)","Swagger: generate augmentations in respective modules","Expose request-timeout parameter","Bump dependency-check to 7.3.0","readEntireConfig toString returns plain content","Reading mount configuration in the uniconfig-client"]}],[{"i":"uniconfig-5019-release-notes","l":"Uniconfig 5.0.19 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Fixed hardcoded part and added one more if condition to receiver transceiver data for IOS-XE (#493)","Stable XR6 XR66 devices (#471)","Implementation of storing failed installations into DB (stable 5.0.x)","Implemented TU for adding/removing users for Saos6 (#438)","Implemented TU for collecting transceiver information for IOS-XE (#439)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Downgraded sshd to 2.8.0","Fix uninstall -> install transition","Swagger: fix generation of operational APIs (5.0.X)","Fix CVEs","Fixed reading public key from NETCONF device (NPE)","Fixed distribution of mount failure from GNMi layer","Swagger: Fix path filtering"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["expose gnmi parameters","add overallStatus to multiple-nodes-rpc-output","Improved error message when connection cannot be created","Exposed DOMMountPointService configuration","Optimization of mountpoint notifications","Added logs into DOM Mountpoint Service"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Configurable re-sending cli commands","Fixed mount point creation for CLI topology","Removed unified-topology.yang","Refactored unified layer and mounting/unmounting process - updates","Refactored unified layer and mounting/unmounting process","Added logging level for shell to the logback.xml"]}],[{"i":"uniconfig-5020-release-notes","l":"Uniconfig 5.0.20 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Support for GetInstalledNodes in UC client - 5.0.x","Add getJSONOutput to UniConfig client","Added some commands for collecting data for IOS-XE (#515)","Skip unreachable nodes at commit"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed leafref version-drop","Suppress Netty FP CVEs","Fixed quit command in the shell","Fixed pattern handling and XPath extension parsing","Fixed parsing of seconds in XR native metadata unit","Swagger: fix generation of action nodes (5.0.X)","Swagger: fix no key lists generation (5.0.X)","Fixed locking of nodes from TX with enabled dedicated sessions (#523)","Fix bug in bulk edit operation","Swagger: fix generation of operation children from config container"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Change overallStatus when skiping unreachable nodes","handle user parameters input in GnmiDefaultParametersService","Removed 'reconcile' mountpoint parameter (#572)","Disable verification of supported query parameters (5.0.x) (#549)","Swagger: toggle generation of POST apis for containers","Bulk-edit rpc improvements"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Swagger: fix npe in custom operational path (5.0.X)","Added only-vlan parser, upgraded trunk-vlans for huawei (#528)","Callbacks authentication"]}],[{"i":"uniconfig-5021-release-notes","l":"Uniconfig 5.0.21 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Add migration for removing node-extension:reconcile"]}],[{"i":"uniconfig-5022-release-notes","l":"Uniconfig 5.0.22 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["make replace request on correct node for gnmi","Add GetTemplateNodes RPC and add support to Client","Implemented some new commands for Huawei TU (#580)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed encryption of leaves marked using deviations","Fix vulnerabilities by changing base docker image","Stop cleaning YANG repos associated to persisted nodes","Swagger: fix actions using custom operational path","Suppressed CVE-2021-4277 (#612)","Fix CVE-2021-37533"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: mandatory indicator","Improved registration of unexpected YANGs downloaded from device","Add compare-config RPC.","Changed logging level: Unable to map identifier to capability","Skipping unknown fields in GET request","Added an RPC input to enable error handling for execute-and-read RPC (#662)","Fetch Kafka settings to client.","add gnmi protocol to get-installed-nodes RPC","Swagger: add drop-down for topology-id parameter"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["PUT and DELETE operations for callbacks"]}],[{"i":"uniconfig-5023-release-notes","l":"Uniconfig 5.0.23 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implementation of RPD TUs for CER(Arris)","Implementation of Cable-Mac Oper TUs for CER(Arris)","Replace paths feature","Implementation of cable-upstream TUs for CER(Arris)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Added new control to check if cached prompt is invalid (#809)","Fixed writer of cable upstream interface","Added new control to check if cached prompt is invalid (#633)","Fix delete request in replace-paths","Fixed schema context building","Java based migration for huawei config","Bulk-edit - removed the version comparison before version drop procedure","Bump dependencycheck.version, update suppress for CVE-2022-41915, CVE-2022-41881"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: regular expressions with example values","Extended TreeConfigParser to handle arris device's behavior for cable-upstreams","Change isInstalled method implementation in uniconfig-client","JRE-17 compatibility","Added read option to bulk-edit RPC","Bump sshd to 2.9.2","Prefer 'latest' repository in latest repository update process","Change status code if transaction is not valid. (#711)"]}],[{"i":"uniconfig-5024-release-notes","l":"Uniconfig 5.0.24 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implemented RPD related commands to fiber-node TU for Arris Commscope","Swagger: difference between OpenAPI specifications"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed RPD related writers for Arris Commscope","Fixed update templates in CableInterfaceUpstreamConfigWriter for Arris Commscope","Fixed callback leaf-list input parameter (#948)","Added a verification to check if lineIndex is lower than total number of parsed lines for multiline commands","Fixed CLI SSH KEX initialization","Fixed data decryption during apply-template RPC","Fixed regex for \"show cable modem\" command (#897)","Generate action names in java constants"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: shorter operational path"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Implementation of RPD TUs for CER(Arris) (#819)"]}],[{"i":"uniconfig-5025-release-notes","l":"Uniconfig 5.0.25 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Subtree-based resolution of conflicts between committed nodes (#989)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fix: Duplicate module name in Yang schemas","Fixed update template to update fiber node for Arris Commscope","Fixed uninstall node rpc","Fixed parsing leaf-list into JSONObject","Fixed construction of Tree (callbacks system test)","Fixed problem with re-write data of transaction by other transaction (#1032)","Fixed method to add unistore FP","Fix deriving of DB reader path","fix read only lock in uniconfig task executor (#981)","Fixed overriding of default mount settings by uniconfig-client"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["cli-shell set callback... suggest only commands that contain input body","Setting default spin/park time in notification router config","Optimised sending of internal notifications","Optimised lookup in modified uniconfig-topology & network-topology modules","Optimise detection of updated mount data in notification monitoring system","Add additional logs to precondition checks in SchemaContextUtil","Subtree-based resolution of conflicts between committed nodes (#989)","Improved the processing time of sync RPC for ios devices","Optimisation of single transaction-log entry reading","Added dedicated reader for single transaction-log entry","Add batching process for parallel reading of config"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Optimalization of handling saos devices"]}],[{"i":"uniconfig-510-release-notes","l":"Uniconfig 5.1.0 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Support for GetInstalledNodes in UC client","Add getJSONOutput to UniConfig client","Added some commands for collecting data for IOS-XE (#527)","Skip unreachable nodes at commit","Fixed hardcoded part and added one more if condition to receiver transceiver data for IOS-XE (#490)","added implementation of XR6 and XR6.6 devices as native units","Implementation of storing failed installations into DB (main)","Implemented TU for adding/removing users for Saos6 (#360)","Implemented TU for collecting transceiver information for IOS-XE (#437)","Implemented TU for collecting all the information in NTP MCS for Saos6 and Saos8 (#352)","Implemented low priority fields for collecting inventory for SAOS6 CEN(ring) (#341)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Add migration for removing node-extension:reconcile","Bump units versions to 5.1.0-SNAPSHOT","Downgraded sshd to 2.8.0","Fix : bad package for GnmiDefaultParametersService","Fix bug in bulk edit operation","Fix CVE-2021-37533","Fix CVEs","fix Jsonb filter element filtering","Fix Logback and cleanup POMs","Fix uninstall -> install transition","Fixed cleanup of existing MP during mount-node RPC","Fixed distribution of mount failure from GNMi layer","Fixed encryption of sensitive info passed via template variables (#326)","Fixed hardcoded part and added one more if condition to receiver transceiver data for IOS-XE (#490)","Fixed leafref version-drop","Fixed locking of nodes from TX with enabled dedicated sessions (#522)","Fixed parsing of seconds in XR native metadata unit","Fixed passing leaf-list in shell callbacks","Fixed pattern handling and XPath extension parsing","Fixed quit command in the shell","Fixed reading public key from NETCONF device (NPE)","Fixed reconcile SQL migration file","Fixed synchronization in datastore transaction","Stop reporting metrics into log/logs.log and stdout (#598)","Suppress Netty FP CVEs","Swagger - Fix request generation","Swagger: fix generation of action nodes","Swagger: Fix generation of arguments in path","Swagger: fix generation of choice nodes","Swagger: fix generation of operation children from config container","Swagger: fix generation of operational APIs","Swagger: fix no key lists generation","Swagger: Fix path filtering"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["add overallStatus to multiple-nodes-rpc-output","Added logs into DOM Mountpoint Service","Bulk-edit rpc improvements","Bump dependency-check to 7.3.0","Bump logback","Change overallStatus when skiping unreachable nodes","Changed order of some commands for SAOS8 (#348)","Disable verification of supported query parameters (5.1.x) (#550)","expose gnmi parameters","Expose request-timeout parameter","Exposed DOMMountPointService configuration","handle user parameters input in GnmiDefaultParametersService","Improved error message when connection cannot be created","Improved reporting of parsing issues in RESTCONF","Optimization of mountpoint notifications","readEntireConfig toString returns plain content","Reading mount configuration in the uniconfig-client","Removed 'reconcile' mountpoint parameter (#573)","Swagger - Improve start file","Swagger: Custom operational path","Swagger: generate augmentations in respective modules","Swagger: toggle generation of POST apis for containers"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Swagger: fix npe in custom operational path","Added only-vlan parser, upgraded trunk-vlans for huawei (#444)","Callbacks authentication","Configurable re-sending cli commands","Fixed mount point creation for CLI topology","Removed unified-topology.yang","Refactored unified layer and mounting/unmounting process - updates","Refactored unified layer and mounting/unmounting process","Added logging level for shell to the logback.xml"]}],[{"i":"uniconfig-511-release-notes","l":"Uniconfig 5.1.1 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["make replace request on correct node for gnmi","Implemented some new commands for Huawei TU (#580) (#639)","Add GetTemplateNodes RPC and add support to Client","Implementation of MIB parser using ANTLR grammar"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed NPE for JSONCodecFactoryLoader","Fixed deserialization of uniconfig instance record read from DB (#700)","Fixed encryption of leaves marked using deviations","Fix vulnerabilities by changing base docker image","Stop cleaning YANG repos associated to persisted nodes","Swagger: fix actions using custom operational path","Suppressed CVE-2021-4277 (#613)"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: mandatory indicator","Improved registration of unexpected YANGs downloaded from device","Add compare-config RPC.","Changed logging level: Unable to map identifier to capability","Skipping unknown fields in GET request","Added an RPC input to enable error handling for execute-and-read RPC (#668)","Fetch Kafka settings to client.","gnmi support for upgrade-from-network RPC","add gnmi protocol to get-installed-nodes RPC","Swagger: add drop-down for topology-id parameter"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["PUT and DELETE operations for callbacks"]}],[{"i":"uniconfig-512-release-notes","l":"Uniconfig 5.1.2 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implementation of RPD TUs for CER(Arris)","Implementation of Cable-Mac Oper TUs for CER(Arris)","Replace paths feature","Implementation of cable-upstream TUs for CER(Arris)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Added new control to check if cached prompt is invalid (#812)","Fixed writer of cable upstream interface","Fix delete request in replace-paths","Fixed schema context building","Changed log level in mockito-configuration to INFO","Fix wrong groupIds","Java based migration for huawei config","Bulk-edit - removed the version comparison before version drop procedure","Bump dependencycheck.version, update suppress for CVE-2022-41915, CVE-2022-41881"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Add Dependency Upgrades to release notes","Added calc-diff result to audit logs","Added read option to bulk-edit RPC","Adopt Mockito 5","Bump dependency-check, cleanup unused suppressions","Bump sshd to 2.9.2","Change isInstalled method implementation in uniconfig-client","Change status code if transaction is not valid. (#699)","Enable dependabot updates","Extended TreeConfigParser to handle arris device's behavior for cable-upstreams","Migrate codebase to Java 17 & bump dependencies & clean maven structure","Prefer 'latest' repository in latest repository update process","README - Update Running from IDE section","Remove license server","Remove license token from README.md and run_uniconfig.sh script.","Replace com.google.common.base.Optional with java.util.Optional","Swagger: regular expressions with example values","Unify antlr4 version","Update README.md running uniconfig from IDE","Updated list of supported Unicode blocks (RegexUtils)"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["Bump antlr4-maven-plugin from 4.10.1 to 4.11.1","Bump byte-buddy.version from 1.12.22 to 1.13.0","Bump commons-dbcp2 from 2.7.0 to 2.9.0","Bump commons-lang3 from 3.7 to 3.12.0","Bump disruptor from 3.3.10 to 3.4.4","Bump embedded-postgres from 1.2.10 to 2.0.3","Bump grpc.version from 1.51.1 to 1.53.0","Bump httpclient from 4.5.13 to 4.5.14","Bump jackson-bom from 2.14.1 to 2.14.2","Bump jna.version from 4.5.0 to 5.13.0","Bump maven-enforcer-plugin from 3.1.0 to 3.2.1","Bump netty.version from 4.1.86.Final to 4.1.89.Final","Bump objenesis from 2.1 to 3.3","Bump okhttp.version from 4.9.1 to 4.10.0","Bump org.eclipse.jdt.annotation from 2.1.0 to 2.2.700","Bump perfmark-api from 0.25.0 to 0.26.0","Bump properties-maven-plugin from 1.0.0 to 1.1.0"]}],[{"i":"uniconfig-513-release-notes","l":"Uniconfig 5.1.3 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Swagger: difference between OpenAPI specifications"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fix UC not starting when using standalone database - binding","Fix UC not starting when using standalone database","Fix & rewrite calc-diff to new format","Fixed regex for \"show cable modem\" command (#898)","Generate action names in java constants"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Migrate to JUnit5","Removed unused JMX classes"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Implementation of RPD TUs for CER(Arris) (#862)"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["Bump actions/setup-python from 4.3.0 to 4.5.0 (#760)","Bump actions/upload-artifact from 3.1.1 to 3.1.2","Bump annotations from 3.0.1 to 3.0.1u2","Bump antlr4.version from 4.11.1 to 4.12.0","Bump async-http-client from 1.9.24 to 1.9.40","Bump AutoModality/action-clean from 1.1.0 to 1.1.1","Bump byte-buddy.version from 1.13.0 to 1.14.0","Bump commons-cli from 1.4 to 1.5.0","Bump commons-compress from 1.21 to 1.22","Bump commons-fileupload from 1.3.3 to 1.5","Bump crypt4j from 1.0.0 to 1.0.1","Bump docker/build-push-action from 2 to 4 (#761)","Bump docker/login-action from 2.0.0 to 2.1.0 (#762)","Bump dokka-maven-plugin from 1.5.30 to 1.7.20","Bump embedded-postgres-binaries-linux-amd64 from 13.2.0 to 13.10.0","Bump embedded-postgres-binaries-linux-amd64 from 13.2.0 to 15.2.0","Bump exec-maven-plugin from 1.5.0 to 3.1.0","Bump flyway-core from 7.8.1 to 9.15.0","Bump flyway-core from 9.15.0 to 9.15.1","Bump future-converter-java8-guava from 0.3.0 to 1.2.0","Bump gson from 2.9.0 to 2.10.1","Bump jackson-databind from 2.14.1 to 2.14.2","Bump jakarta.servlet-api from 5.0.0 to 6.0.0","Bump jakarta.ws.rs-api from 3.0.0 to 3.1.0","Bump janino from 2.6.1 to 3.1.9","Bump jaxb-impl from 3.0.2 to 4.0.2","Bump jaxen from 1.1.6 to 2.0.0","Bump jersey.version from 3.0.8 to 3.1.1","Bump jetty-bom from 11.0.11 to 11.0.13","Bump jline.version from 3.21.0 to 3.22.0","Bump jmh-core.version from 1.21 to 1.36","Bump joelwmale/webhook-action from 2.1.0 to 2.3.2 (#759)","Bump jsonassert from 1.5.0 to 1.5.1","Bump junit-jupiter-api from 5.9.1 to 5.9.2","Bump ktlint from 0.24.0 to 0.31.0","Bump maven-assembly-plugin from 3.4.2 to 3.5.0","Bump maven-deploy-plugin from 3.0.0 to 3.1.0","Bump maven-failsafe-plugin from 3.0.0-M8 to 3.0.0-M9","Bump maven-invoker-plugin from 3.4.0 to 3.5.0","Bump maven-jar-plugin from 3.0.2 to 3.3.0","Bump maven-javadoc-plugin from 3.4.1 to 3.5.0","Bump maven-resources-plugin from 3.0.1 to 3.3.0","Bump maven.surefire.version from 3.0.0-M8 to 3.0.0-M9","Bump metrics-core from 4.2.12 to 4.2.16","Bump opentelemetry-api from 1.9.0 to 1.23.1","Bump postgresql from 42.5.1 to 42.5.4","Bump protobuf-maven-plugin from 0.5.1 to 0.6.1","Bump protobuf.version from 3.21.7 to 3.22.0","Bump sevntu-checks from 1.43.0 to 1.44.1","Bump spring-jdbc from 5.3.24 to 5.3.25 (#855)","Bump spring.boot.version from 2.7.6 to 2.7.8","Bump spring.boot.version from 2.7.8 to 2.7.9","Bump stax2-api from 3.1.4 to 4.2.1","Bump stCarolas/setup-maven from 4.3 to 4.5","Bump swagger-core from 2.2.4 to 2.2.8","Bump swagger-parser from 1.0.31 to 1.0.64","Bump triemap from 1.1.0 to 1.2.0","Bump truth.version from 0.36 to 1.1.3","Bump value from 2.9.2 to 2.9.3"]}],[{"i":"uniconfig-514-release-notes","l":"Uniconfig 5.1.4 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implementation of MIB repository & context","Implemented RPD related commands to fiber-node TU for Arris Commscope"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed RPD related writers for Arris Commscope","Fixed update templates in CableInterfaceUpstreamConfigWriter for Arris Commscope","Fix calc-diff when data is in LeafNode","Fix subtree calc-diff in audit log when data has not changed","Fixed callback leaf-list input parameter (#949)","Added a verification to check if lineIndex is lower than total number of parsed lines for multiline commands","Fixed CLI SSH KEX initialization","Fixed data decryption during apply-template RPC"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: shorter operational path"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps): bump dokka-maven-plugin from 1.7.20 to 1.8.10","build(deps-dev): bump maven-plugin-annotations from 3.7.1 to 3.8.1","build(deps): bump json from 20220924 to 20230227","build(deps): bump dependency-check-maven from 8.1.0 to 8.1.2","Bump reflections from 0.9.11 to 0.10.2","Bump maven-compiler-plugin from 3.10.1 to 3.11.0","Bump jetty-bom from 11.0.13 to 11.0.14","Bump maven-plugin-plugin from 3.7.1 to 3.8.1","Bump metrics-core from 4.2.16 to 4.2.17","Bump spotbugs-maven-plugin from 4.7.3.0 to 4.7.3.2","Bump maven-dependency-plugin from 3.1.1 to 3.5.0","Bump checkstyle from 10.7.0 to 10.8.0","Bump maven-antrun-plugin from 1.8 to 3.1.0"]}],[{"i":"uniconfig-515-release-notes","l":"Uniconfig 5.1.5 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Implement rate limiting (#1061)","Add DOMRpcService for gNOI","Subtree-based resolution of conflicts between committed nodes (#1008)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fix audit-log diff feature (#1056)","Fixed update template to update fiber node for Arris Commscope","Fix: Duplicate module name in Yang schemas","Fixed reading config until timeout (#1067)","Fixed uninstall node rpc","Fixed parsing leaf-list into JSONObject","Fixed construction of Tree (callbacks system test)","Fixed problem with re-write data of transaction by other transaction (#1031)","Fixed method to add unistore FP","Fix bug with audit log while calling commit RPC (#1007)","Fix deriving of DB reader path","Add missing sslpassword configuration parameter (#990)","fix read only lock in uniconfig task executor (#982)","Fixed overriding of default mount settings by uniconfig-client"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Swagger: Migrate unit tests to v3","Added topology-id to DCE notification","cli-shell set callback... suggest only commands that contain input body","Setting default spin/park time in notification router config","Optimised sending of internal notifications","Optimised lookup in modified uniconfig-topology & network-topology modules","Optimise detection of updated mount data in notification monitoring system","Add additional logs to precondition checks in SchemaContextUtil","Subtree-based resolution of conflicts between committed nodes (#1008)","Improved the processing time of sync RPC for ios devices","Optimisation of single transaction-log entry reading","Added dedicated reader for single transaction-log entry","Add batching process for parallel reading of config"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Arris CER interface bugfix (#1086)","Optimalization of handling saos devices"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.15.1 to 9.15.2","build(deps-dev): bump flyway-core from 9.15.2 to 9.16.0 (#1012)","build(deps-dev): bump flyway-core from 9.16.0 to 9.16.1 (#1052)","build(deps-dev): bump swagger-parser from 1.0.64 to 1.0.65 (#1074)","build(deps): bump byte-buddy.version from 1.14.0 to 1.14.1","build(deps): bump byte-buddy.version from 1.14.1 to 1.14.2 (#1003)","build(deps): bump checkstyle from 10.8.0 to 10.8.1 (#988)","build(deps): bump checkstyle from 10.8.1 to 10.9.2 (#1028)","build(deps): bump checkstyle from 10.9.2 to 10.9.3 (#1073)","build(deps): bump commons-compress from 1.22 to 1.23.0 (#1059)","build(deps): bump dependency-check-maven from 8.1.2 to 8.2.1 (#1062)","build(deps): bump grpc.version from 1.53.0 to 1.54.0 (#1071)","build(deps): bump jline.version from 3.22.0 to 3.23.0","build(deps): bump maven-deploy-plugin from 3.1.0 to 3.1.1 (#1072)","build(deps): bump maven-failsafe-plugin from 3.0.0-M9 to 3.0.0 (#1011)","build(deps): bump maven-help-plugin from 3.3.0 to 3.4.0 (#1025)","build(deps): bump maven-install-plugin from 3.1.0 to 3.1.1 (#1075)","build(deps): bump maven-release-plugin from 3.0.0-M7 to 3.0.0 (#1037)","build(deps): bump maven.core.version from 3.9.0 to 3.9.1 (#1027)","build(deps): bump maven.surefire.version from 3.0.0-M9 to 3.0.0 (#1013)","build(deps): bump metrics-core from 4.2.17 to 4.2.18 (#1038)","build(deps): bump mockito-core from 5.1.1 to 5.2.0 (#987)","build(deps): bump netty.version from 4.1.89.Final to 4.1.90.Final (#1010)","build(deps): bump opentelemetry-api from 1.23.1 to 1.24.0 (#999)","build(deps): bump postgresql from 42.5.4 to 42.6.0 (#1026)","build(deps): bump protobuf.version from 3.22.0 to 3.22.1","build(deps): bump protobuf.version from 3.22.1 to 3.22.2 (#1000)","build(deps): bump spotbugs-maven-plugin from 4.7.3.2 to 4.7.3.3 (#1070)","build(deps): bump spring-jdbc from 5.3.25 to 5.3.26 (#1036)","build(deps): bump spring.boot.version from 2.7.9 to 2.7.10 (#1069)","build(deps): bump swagger-core from 2.2.8 to 2.2.9 (#1039)"]}],[{"i":"uniconfig-516-release-notes","l":"Uniconfig 5.1.6 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Saos 8 command order fix.","Fixed execution order of commands for sub-port creation","Fixed a bug that causes cli closed error if config output and prompt has same length","Fix dryrun mount node task.","diff improvements (#1107)","Swagger: fix union type with patterns","Disable NETCONF level keepalive mechanism in streaming session","Fixed onEmpty section in templates for rpd ds and us conns for Arris Commscope","Fixed a bug that causes cli closed error for saos devices when commit or execute RPCs are triggered"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Refactoring ServiceInstanceWriter","Adjusted log levels of common logs","Swagger: filterPath improvement","diff improvements (#1107)","Rewrite kafka configs (#1105)"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["JSON input in Uniconfig shell problem fix","Shell logger","Create and publish Netconf test tool image to DockerHub"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.16.1 to 9.16.3 (#1139)","build(deps): bump actions/upload-artifact from 3.1.1 to 3.1.2 (#1131)","build(deps): bump bouncycastle.version from 1.72 to 1.73 (#1138)","build(deps): bump byte-buddy.version from 1.14.2 to 1.14.3","build(deps): bump byte-buddy.version from 1.14.3 to 1.14.4","build(deps): bump grpc.version from 1.54.0 to 1.54.1 (#1134)","build(deps): bump jetty-bom from 11.0.14 to 11.0.15 (#1127)","build(deps): bump jline.version from 3.22.0 to 3.23.0 (#998)","build(deps): bump json-path from 2.7.0 to 2.8.0 (#1093)","build(deps): bump kotlin.version from 1.8.10 to 1.8.20","build(deps): bump maven-enforcer-plugin from 3.2.1 to 3.3.0 (#1114)","build(deps): bump maven-invoker-plugin from 3.5.0 to 3.5.1 (#1115)","build(deps): bump maven-resources-plugin from 3.3.0 to 3.3.1","build(deps): bump netty.version from 4.1.90.Final to 4.1.91.Final (#1113)","build(deps): bump opentelemetry-api from 1.24.0 to 1.25.0 (#1135)","build(deps): bump protobuf.version from 3.22.2 to 3.22.3","build(deps): bump spotbugs-maven-plugin from 4.7.3.3 to 4.7.3.4 (#1126)","build(deps): bump triemap from 1.2.0 to 1.3.0"]}],[{"i":"uniconfig-517-release-notes","l":"Uniconfig 5.1.7 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Lazy loading/unloading of native schema contexts (#1171)","Creation of MIB context to SchemaContext adapter (#1169)"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fixed lazy apply-template","Fixed a bug that causes cli closed error if config output's length is less than prompt's length after config output is trimmed","Fix Swagger regex example generation","Fixed loading of schema context from swagger directory","Make request max size configurable for gnmi devices","Integrate encryption in create-multiple-templates RPC.","Fixed UniconfigTransactionsMediator initialization (#1181)","Remove TestNG (#1159)","Fix UC stuck when CPU is full and queue is empty (#1168)"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Support for gnmi in shell","Implementation of idle timeout print in CLI (#1172)","Lazy loading/unloading of native schema contexts (#1171)","Improved the processing time of sync RPC for ios/iosxe devices","Unify movement in shell (#1005)","Enabled cable-upstream writer for interfaces that has number/number/number pattern as name","Improved apply-template RPC (#1111)"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Add lazy loading to shell"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.16.3 to 9.17.0 (#1177)","build(deps-dev): bump maven-plugin-annotations from 3.8.1 to 3.8.2 (#1152)","build(deps): bump checkstyle from 10.9.3 to 10.10.0 (#1176)","build(deps): bump jackson-bom from 2.14.2 to 2.15.0 (#1158)","build(deps): bump jackson-databind from 2.14.2 to 2.15.0 (#1153)","build(deps): bump jakarta.activation-api from 2.1.1 to 2.1.2 (#1178)","build(deps): bump jgrapht.version from 1.5.1 to 1.5.2","build(deps): bump junit.jupiter.version from 5.9.2 to 5.9.3 (#1175)","build(deps): bump kotlin.version from 1.8.20 to 1.8.21 (#1174)","build(deps): bump maven-checkstyle-plugin from 3.2.1 to 3.2.2 (#1157)","build(deps): bump maven-plugin-plugin from 3.8.1 to 3.8.2 (#1154)","build(deps): bump maven-project-info-reports-plugin from 3.4.2 to 3.4.3 (#1145)","build(deps): bump mockito.core.version from 5.2.0 to 5.3.1 (#1156)","build(deps): bump netty.version from 4.1.91.Final to 4.1.92.Final (#1173)","build(deps): bump okhttp.version from 4.10.0 to 4.11.0 (#1155)","build(deps): bump protobuf.version from 3.22.3 to 3.22.4"]}],[{"i":"uniconfig-518-release-notes","l":"Uniconfig 5.1.8 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Add change-encryption-status rpc (#1259) - UNIC-1090","Added some commands for collecting slot data for IOS-XE - VZ-734"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Swagger: Fix path filtering ignoring cruds in lists - UNIC-1315","Fix failing shell tests (#1285)","Compare decrypted strings in calculate-diff procedure (#1266) - UNIC-1173","Changed execution order of commands for CPE ZTP provision for SAOS8"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Improved error output when two transactions want to update same data … (#1238)","Swagger: Improve OpenAPI difference calculation - UNIC-1298","Provide option for reading mount-point info - UNIC-1097","Refactored InstanceIdentifierContext (#1249) - UNIC-1211","Add option to gnmi-topology to read specific data - PANT-72","Add unsuported keys for cli connection (#1203)","Add JIRA tag to release notes - UNIC-1243","Optimize LeafRef context build. - UNIC-988","Build LeafRef Tree in background. - UNIC-988","Separated reader/writer for IUCs from main classes for Arris Commscope","Unify annotation usage in uniconfig codebase","support error info (#1201) - UNIC-1136","Rewrite RestConf module DI (#1202) - UNIC-1101"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Create a new set of installation parameters"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.17.0 to 9.19.1","build(deps-dev): bump maven-plugin-annotations from 3.8.2 to 3.9.0 (#1265)","build(deps-dev): bump swagger-parser from 1.0.65 to 1.0.66","build(deps): bump antlr4.version from 4.12.0 to 4.13.0 (#1256)","build(deps): bump build-helper-maven-plugin from 3.3.0 to 3.4.0","build(deps): bump checkstyle from 10.10.0 to 10.11.0 (#1254)","build(deps): bump checkstyle from 10.11.0 to 10.12.0","build(deps): bump embedded-postgres from 2.0.3 to 2.0.4 (#1270)","build(deps): bump embedded-postgres-binaries-linux-amd64 from 13.10.0 to 13.11.0 (#1274)","build(deps): bump git-commit-id-maven-plugin from 5.0.0 to 6.0.0","build(deps): bump grpc.version from 1.54.1 to 1.55.1 (#1199)","build(deps): bump guice.version from 5.1.0 to 7.0.0 (#1253)","build(deps): bump jackson-bom from 2.15.0 to 2.15.1","build(deps): bump jackson-databind from 2.15.0 to 2.15.1 (#1264)","build(deps): bump jersey.version from 3.1.1 to 3.1.2","build(deps): bump json-smart from 2.4.10 to 2.4.11","build(deps): bump kotlinx-coroutines-core from 1.6.4 to 1.7.0 (#1198)","build(deps): bump kotlinx-coroutines-core from 1.7.0 to 1.7.1","build(deps): bump maven-assembly-plugin from 3.5.0 to 3.6.0 (#1272)","build(deps): bump maven-checkstyle-plugin from 3.2.2 to 3.3.0","build(deps): bump maven-failsafe-plugin from 3.0.0 to 3.1.0 (#1196)","build(deps): bump maven-plugin-plugin from 3.8.2 to 3.9.0","build(deps): bump maven-remote-resources-plugin from 3.0.0 to 3.1.0","build(deps): bump maven-source-plugin from 3.2.1 to 3.3.0 (#1271)","build(deps): bump maven-surefire-plugin from 3.0.0 to 3.1.0 (#1197)","build(deps): bump maven.core.version from 3.9.1 to 3.9.2 (#1215)","build(deps): bump netty.version from 4.1.92.Final to 4.1.93.Final","build(deps): bump opentelemetry-api from 1.25.0 to 1.26.0 (#1195)","build(deps): bump protobuf.version from 3.22.4 to 3.23.0","build(deps): bump protobuf.version from 3.23.0 to 3.23.1 (#1273)","build(deps): bump spring-jdbc from 6.0.8 to 6.0.9","build(deps): bump spring.boot.version from 3.0.6 to 3.0.7 (#1269)","build(deps): bump sshd.version from 2.9.2 to 2.10.0","build(deps): bump swagger-core from 2.2.9 to 2.2.10","build(deps): bump triemap from 1.3.0 to 1.3.1"]}],[{"i":"uniconfig-519-release-notes","l":"Uniconfig 5.1.9 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["DOMDataBroker for SNMP (#1299) - UNIC-1200","UNIC-1200","Integration of southbound RESTCONF RPC service to UniConfig shell (#1310) - UNIC-1310"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Fix unreachable ports for device discovery RPC - UNIC-1322","Certificate Manager servers not getting created in appliance context - fixed output - UNIC-1309","Fix removing of exception from error-info/error-message - PANT-78","Add MapNode serialization to gNMI Update - UNIC-1323"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Changeable thread parameters - UNIC-1250","Swagger: Added html output to diff generation - UNIC-1324"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.19.1 to 9.19.4","build(deps-dev): bump swagger-parser from 1.0.66 to 1.0.67","build(deps): bump byte-buddy.version from 1.14.4 to 1.14.5","build(deps): bump commons-io from 2.11.0 to 2.12.0","build(deps): bump commons-io from 2.12.0 to 2.13.0","build(deps): bump dependency-check-maven from 8.2.1 to 8.3.1","build(deps): bump dokka-maven-plugin from 1.8.10 to 1.8.20","build(deps): bump grpc.version from 1.55.1 to 1.56.0","build(deps): bump guava.version from 31.1-jre to 32.0.0-jre","build(deps): bump guava.version from 32.0.0-jre to 32.0.1-jre","build(deps): bump jackson-bom from 2.15.1 to 2.15.2","build(deps): bump jackson-databind from 2.15.1 to 2.15.2","build(deps): bump jaxb-runtime from 4.0.2 to 4.0.3","build(deps): bump kafka-clients from 3.4.0 to 3.4.1","build(deps): bump kafka-clients from 3.4.1 to 3.5.0","build(deps): bump kotlin.version from 1.8.21 to 1.8.22","build(deps): bump maven-dependency-plugin from 3.5.0 to 3.6.0","build(deps): bump maven-failsafe-plugin from 3.1.0 to 3.1.2","build(deps): bump maven-project-info-reports-plugin from 3.4.3 to 3.4.4","build(deps): bump maven-project-info-reports-plugin from 3.4.4 to 3.4.5","build(deps): bump maven-release-plugin from 3.0.0 to 3.0.1","build(deps): bump maven-surefire-plugin from 3.1.0 to 3.1.2","build(deps): bump metrics-core from 4.2.18 to 4.2.19","build(deps): bump opentelemetry-api from 1.26.0 to 1.27.0","build(deps): bump protobuf.version from 3.23.1 to 3.23.2","build(deps): bump swagger-core from 2.2.10 to 2.2.11","build(deps): bump swagger-core from 2.2.11 to 2.2.12","build(deps): bump truth.version from 1.1.3 to 1.1.4"]}],[{"i":"uniconfig-5110-release-notes","l":"Uniconfig 5.1.10 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Get API returns 3 response for single request (#1396) - VHD-324","Fixed too long error output in NETCONF LOG message - UNIC-649","UNIC-1319 Issue with Netconf install WorkFlow - fix logs for testing workflows - UNIC-1319","Upgrade Template didn't load repository - UNIC-1334","Save yang repository in transaction - VHD-324"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["store netconf keys in db (#1380) - VHD-326","maven cleanup (#1379) - UNIC-1291","Removed unused code from sal-dom-spi and dependencies","Extract shell actions to use RestconfDOMActionService (#1346) - UNIC-1313"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Parse and fill datastore with initial JSON file in MDSAL mode"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump flyway-core from 9.19.4 to 9.20.0","build(deps): bump actions/setup-python from 4.5.0 to 4.6.1","build(deps): bump bouncycastle.version from 1.73 to 1.74","build(deps): bump bouncycastle.version from 1.74 to 1.75","build(deps): bump checkstyle from 10.12.0 to 10.12.1","build(deps): bump commons-codec from 1.15 to 1.16.0","build(deps): bump docker/login-action from 2.1.0 to 2.2.0","build(deps): bump grpc.version from 1.56.0 to 1.56.1 (#1390)","build(deps): bump guava.version from 32.0.1-jre to 32.1.1-jre (#1388)","build(deps): bump janino from 3.1.9 to 3.1.10 (#1389)","build(deps): bump json from 20230227 to 20230618","build(deps): bump maven-clean-plugin from 3.2.0 to 3.3.1","build(deps): bump maven-invoker-plugin from 3.5.1 to 3.6.0","build(deps): bump maven-shade-plugin from 3.4.1 to 3.5.0","build(deps): bump metainf-services from 1.9 to 1.11 (#1375)","build(deps): bump mockito.core.version from 5.3.1 to 5.4.0","build(deps): bump netty-handler in /commons/parents/odlparent","build(deps): bump spotbugs-maven-plugin from 4.7.3.4 to 4.7.3.5","build(deps): bump spring-jdbc from 6.0.9 to 6.0.10","build(deps): bump spring.boot.version from 3.0.7 to 3.0.8","build(deps): bump sshd.version from 2.9.2 to 2.10.0 (#1251)","build(deps): bump swagger-core from 2.2.12 to 2.2.14","build(deps): bump truth.version from 1.1.4 to 1.1.5"]}],[{"i":"uniconfig-5111-release-notes","l":"Uniconfig 5.1.11 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Swagger: Fix generation of operational data from Uniconfig schemas (#1444) - UNIC-1280","Fixed unmounting of node that is in connecting state - UNIC-1281"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Changing use case of prompt stylization - UNIC-977","Enabling and cleaning up SHELL checkstyles (#1451) - UNIC-1345","Unifying, renaming and increasing readability of UC-shell - UNIC-977","Add mutable transaction to Shell (#1399) - UNIC-1312","Swagger: Unit tests - UNIC-1186","Removed sal-common-impl module","Merged mdsal-dom-spi to sal-dom-spi module (5.1.x) (#1417)","add ValueCase to gnmi codec - UNIC-1113"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Read All data type on specific paths"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps): bump org.apache.kafka:kafka-clients from 3.5.0 to 3.5.1 (#1476)","build(deps): bump maven.core.version from 3.9.2 to 3.9.3 (#1358)"]}],[{"i":"uniconfig-5112-release-notes","l":"Uniconfig 5.1.12 Release Notes"},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["fix_regex_matching_of_identity - UNIC-1375"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Shell explicit show in config state (#1484) (#1503) - UNIC-1325","Shell caching data - 5.1.x-stable (#1496) - UNIC-1357"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Swagger: Code cleanup - 5.1.x-stable (#1489)"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps): bump org.codehaus.mojo:properties-maven-plugin from 1.1.0 to 1.2.0 (#1522)"]}],[{"i":"uniconfig-5113","l":"Uniconfig 5.1.13"},{"i":"whats-changed","l":"What's Changed"},{"i":"bug-fixes","l":"\uD83D\uDC1E Bug Fixes","p":["Fix key delimiter in URI","Swagger: Fix RPCs placed after mountpoint (#1582)","[UNIC-1410] Fix tx cleanup when request fails","[UNIC-1413] Fixed updating snapshot in immediate-commit model (#1629)","[UNIC-1420] Fix cli ssh session reconnect","[UNIC-1425] Fix crypto bug (#1664)","[UNIC-1352, UNIC-1254] Fix cluster issues (#1670)","[UNIC-1340] Fixed releasing of used YANG modules from memory (#1667)","[UNIC-1429] Fix replace is sent using delete operation","[UNIC-1432] Swagger: Fix generation of post list endpoints (#1674)","[UNIC-1430] - fix replace yang-patch for gnmi mountpoint","[UNIC-1404] UniConfig Shell - fix system augmentation","Fixed loading of YANG from path in client diff tool"]},{"i":"new-features","l":"✅ New Features","p":["[UNIC-1394] Client side diff","[UNIC-1402] UC Shell - default callbacks repository (#1701)"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["[UNIC-981] UniconfigShell: Remove explicit show submode from root mod…","[UNIC-1411] Add mapping to request/response log message","[UNIC-1394] Add overloaded build methods to client side diff","[UNIC-1394] exclude gnmi depenendecies from java client","[UNIC-1401] UniConfig Shell - one line SET / DELETE command (#1621)","[UNIC-1403] Unified format of shell audit logs (#1658)","[PANT-83] add logs for pant83 - STABLE"]},{"i":"api-changes","l":"\uD83D\uDDA5️ API Changes","p":["add gnmi-messages logging broker"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["Fix Jetty CVEs","5.1.x-stable - Maven 3.9.5"]},{"i":"other-changes","l":"\uD83D\uDD27 Other Changes","p":["5.1.13-SNAPSHOT","UniconfigShell: Improving suggestions menu","[FI-1693] Remove Jenkins-test from merge workflow","Release 5.1.13"]}],[{"i":"uniconfig-5114-release-notes","l":"Uniconfig 5.1.14 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Uniconfig shell hide / unhide command implementation - UNIC-1075"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["Prevented sending no description command if there is no change for rpd description (#1818)","Uniconfig shell: information about expired transaction in the root mode"]}],[{"i":"uniconfig-520-release-notes","l":"Uniconfig 5.2.0 Release Notes"},{"i":"new-features","l":"✅ New Features","p":["Parse NOTIFICATION-TYPE from MIB schemas. (#1545) - UNIC-1382","Add rpc change-encryption-keys (#1441) - UNIC-1239","support for gnmi-notifications - Notification + Subscription service (#1109) - UNIC-1184","UNIC-1308","SNMP topology (#1438) - UNIC-1202","UNIC-1202"]},{"i":"bug-fixes","l":"❌ Bug Fixes","p":["fix_regex_matching_of_identity - UNIC-1375","Fixed generation of commit diff notifications","Fix identityRef parsing (#1502) - UNIC-1356","Swagger: Fix generation of operational data from Uniconfig schemas - UNIC-1280","Fixed unmounting of node that is in connecting state - UNIC-1281"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["Add gnmi subscription parameters (#1505) - UNIC-1369","Add mutable transaction to Shell (#1399) - UNIC-1312","add ValueCase to gnmi codec - UNIC-1113","Caching of calculate-diff response in transaction","Changing use case of prompt stylization - UNIC-977","Cleanup dependencies in uniconfig module (#1542) - UNIC-1381","Constants refactoring (#1511) - UNIC-1257","Enabling and cleaning up SHELL checkstyles (#1451) - UNIC-1345","fix connection manager (#1532)","Merged mdsal-dom-api into sal-dom-api module (#1543)","Merged mdsal-dom-spi to sal-dom-spi module (#1415)","Prevented sending command for rpd-index, ucam and dcam attributes if there is no change for CER rpd interfaces","Removed sal-common-impl module (#1426)","Shell caching data (#1480) - UNIC-1357","Shell explicit show in config state (#1484) - UNIC-1325","SNMP refactoring of snmp-topology (#1472) - UNIC-1343","Swagger: Unit tests - UNIC-1186","UNIC-1369","Unifying, renaming and increasing readability of UC-shell - UNIC-977"]},{"i":"api","l":"\uD83D\uDCBB API","p":["Refactor connection-manager RPCs (#1423) - UNIC-1283"]},{"i":"other","l":"\uD83D\uDD27 Other","p":["Read All data type on specific paths"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps-dev): bump org.apache.commons:commons-lang3 from 3.12.0 to 3.13.0 (#1523)","build(deps-dev): bump org.flywaydb:flyway-core from 9.20.0 to 9.21.0 (#1474)","build(deps-dev): bump org.flywaydb:flyway-core from 9.21.0 to 9.21.1 (#1520)","build(deps): bump bouncycastle.version from 1.75 to 1.76 (#1521)","build(deps): bump ch.qos.logback:logback-classic from 1.4.6 to 1.4.8 (#1483)","build(deps): bump ch.qos.logback:logback-classic from 1.4.8 to 1.4.9 (#1562)","build(deps): bump com.google.guava:guava from 32.1.1-jre to 32.1.2-jre (#1557)","build(deps): bump com.puppycrawl.tools:checkstyle from 10.12.1 to 10.12.2 (#1561)","build(deps): bump grpc.version from 1.56.1 to 1.57.0 (#1517)","build(deps): bump grpc.version from 1.57.0 to 1.57.1 (#1559)","build(deps): bump jersey.version from 3.1.2 to 3.1.3 (#1518)","build(deps): bump jmh-core.version from 1.36 to 1.37 (#1558)","build(deps): bump json-smart from 2.4.11 to 2.5.0 (#1404)","build(deps): bump kotlin.version from 1.8.22 to 1.9.0 (#1391)","build(deps): bump kotlinx-coroutines-core from 1.7.1 to 1.7.2 (#1392)","build(deps): bump maven.core.version from 3.9.2 to 3.9.3 (#1358)","build(deps): bump maven.core.version from 3.9.3 to 3.9.4 (#1560)","build(deps): bump netty.version from 4.1.94.Final to 4.1.95.Final (#1477)","build(deps): bump netty.version from 4.1.95.Final to 4.1.96.Final (#1516)","build(deps): bump opentelemetry-api from 1.27.0 to 1.28.0 (#1406)","build(deps): bump org.apache.kafka:kafka-clients from 3.5.0 to 3.5.1 (#1476)","build(deps): bump org.codehaus.mojo:properties-maven-plugin from 1.1.0 to 1.2.0 (#1522)","build(deps): bump org.jetbrains.kotlinx:kotlinx-coroutines-core from 1.7.2 to 1.7.3 (#1519)","build(deps): bump org.junit.jupiter:junit-jupiter from 5.9.3 to 5.10.0 (#1479)","build(deps): bump org.xmlunit:xmlunit-legacy from 2.6.1 to 2.9.1 (#1478)","build(deps): bump protobuf.version from 3.23.2 to 3.23.4 (#1395)","build(deps): bump spring-jdbc from 6.0.10 to 6.0.11 (#1434)","build(deps): bump spring.boot.version from 3.1.1 to 3.1.2 (#1475)","build(deps): bump swagger-core from 2.2.14 to 2.2.15 (#1405)"]}],[{"i":"uniconfig-521","l":"Uniconfig 5.2.1"},{"i":"whats-changed","l":"What's Changed"},{"i":"bug-fixes","l":"\uD83D\uDC1E Bug Fixes","p":["[UNIC-1273] Use Jetty embedded server","[UNIC-1340] Fixed releasing of used YANG modules from memory","[UNIC-1352, UNIC-1254] Fix cluster issues","[UNIC-1365] Gnmi stream fixes","[UNIC-1390] Swagger: Fix RPCs placed after mountpoint","[UNIC-1395] Fix key delimiter in URI","[UNIC-1399] Fix issues with shell","[UNIC-1399] Switch shell terminal back to JNA","[UNIC-1404] UniConfig Shell - fix system augmentation","[UNIC-1410] Fix tx cleanup when request fails","[UNIC-1410] Fix_tx_closing","[UNIC-1413] Fixed updating snapshot in immediate-commit model","[UNIC-1420] Fix cli ssh session reconnect","[UNIC-1423] Fix identityRef as listEntry key in templatesg","[UNIC-1425] Fix crypto bug","[UNIC-1429] Fix replace is sent using delete operation","[UNIC-1430] Fix replace yang-patch for gnmi mountpoint","[UNIC-1432] Swagger: Fix generation of post list endpoints","[UNIC-1446] Fix SpotBugs violations - Reliance on default encoding","[UNIC-1447] Fix SpotBugs violations - Multithreaded correctness","[UNIC-1448] Fix SpotBugs violations - Use a localized version of String.toUpperCase() and String.toLowerCase()","[UNIC-1451] Fix SpotBugs violations - Correctness","[UNIC-1463] Remove duplicates from Set","Add git registry to dependabot.yml","Additional fix to calculate diff rpc","Cleanup test resources properly","Data-change-events publisher fix","Fix calculate diff rpc","Fix immediate commit model and submit successfull nodes.","Fix mapEntryNodes in gnmi notifications","Fix reading of actual YANG repository from mountpoint data","Fix show UC status script","Fix show_uniconfig_status script.","Fix skip of unreachable-nodes.","Fix SNMP Notification bean creation","Fixed DateTime format in the transaction-log","Fixed loading of YANG from path in client diff tool (#1747)","Prevented sending no description command if there is no change for rpd description","Registry attempt no.2","SNMP adjust exception","SNMP Node id is incorrectly parsed"]},{"i":"new-features","l":"✅ New Features","p":["[UNIC-1394] Client side diff","[UNIC-1373] Implemented dryrun-commit for GNMI topology","[UNIC-1398] SNMP notifications","[UNIC-1402] UC Shell - default callbacks repository","[UNIC-1218] Add dynamic property module"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["[PANT-83] add logs for pant83","[UNIC-1154] Integrate JOOQ into database access layer","[UNIC-1218] Rewrite database connection pool and connection properties","[UNIC-1223] ODL parent cleanup","[UNIC-1242] Remove PR links from the uploaded release notes","[UNIC-1242] Unify generation of release notes","[UNIC-1245] Replacing Guava future by CompletableFuture","[UNIC-1258] Fix issues reported by SpotBugs","[UNIC-1273] Adjust bootstrapping of web containers","[UNIC-1370] Adjust notification result parsing","[UNIC-1386] Map correct ObjectTypes to NotificationTypes","[UNIC-1391] Add SNMP notifications to SchemaContext","[UNIC-1394] Add overloaded build methods to client side diff","[UNIC-1394] Remove gnmi dependencies from java client","[UNIC-1401] UniConfig Shell - one line SET / DELETE command","[UNIC-1403] Unified format of shell audit logs","[UNIC-1411] Add mapping to request/response log message (#1630)","[UNIC-1412] change gnmi packaging","[UNIC-1435] Refactor transaction-log to JOOQ style","[UNIC-1441] SNMP config classes","[UNIC-1449] Fix SpotBugs violations - Performance","[UNIC-1463] Fix SpotBugs violations - Code vulnerabilities","[UNIC-980] UniconfigShell: Improving suggestions menu","[UNIC-981] UniconfigShell: Remove explicit show submode from root mode"]},{"i":"api-changes","l":"\uD83D\uDDA5️ API Changes","p":["[UNIC-1289] Refactor RPCs: revert-changes, query-config, device-discovery","[UNIC-1380] Add gnmi-messages logging broker","[UNIC-1287] Refactor snapshot-manager RPCs","[UNIC-1282] Refactoring uniconfig manager RPCs"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["[UNIC-1223] Align an embedded kafka version with the clients provided by Spring","build(deps): bump actions/checkout from 3 to 4","build(deps): bump actions/setup-python from 4.6.1 to 4.7.1","build(deps): bump actions/upload-artifact from 3.1.2 to 3.1.3","build(deps): bump antlr4.version from 4.13.0 to 4.13.1","build(deps): bump com.github.gantsign.maven:ktlint-maven-plugin from 1.16.0 to 2.0.0","build(deps): bump com.github.gantsign.maven:ktlint-maven-plugin from 2.0.0 to 3.0.0","build(deps): bump com.github.spotbugs:spotbugs-maven-plugin from 4.7.3.5 to 4.7.3.6","build(deps): bump com.puppycrawl.tools:checkstyle from 10.12.2 to 10.12.3","build(deps): bump com.puppycrawl.tools:checkstyle from 10.12.3 to 10.12.4","build(deps): bump commons-io:commons-io from 2.13.0 to 2.14.0","build(deps): bump commons-net:commons-net from 3.9.0 to 3.10.0","build(deps): bump docker/build-push-action from 4 to 5","build(deps): bump docker/login-action from 2.2.0 to 3.0.0","build(deps): bump grpc.version from 1.57.1 to 1.57.2","build(deps): bump grpc.version from 1.57.2 to 1.58.0","build(deps): bump io.swagger.core.v3:swagger-core from 2.2.15 to 2.2.16","build(deps): bump io.zonky.test.postgres:embedded-postgres-binaries-linux-amd64 from 13.11.0 to 13.12.0","build(deps): bump kotlin.version from 1.9.0 to 1.9.10","build(deps): bump org.apache.commons:commons-compress from 1.23.0 to 1.24.0","build(deps): bump org.apache.maven.plugins:maven-enforcer-plugin from 3.3.0 to 3.4.0","build(deps): bump org.apache.maven.plugins:maven-enforcer-plugin from 3.4.0 to 3.4.1","build(deps): bump org.apache.maven.plugins:maven-javadoc-plugin from 3.5.0 to 3.6.0","build(deps): bump org.apache.maven.plugins:maven-shade-plugin from 3.5.0 to 3.5.1","build(deps): bump org.immutables:value from 2.9.3 to 2.10.0","build(deps): bump org.jetbrains.dokka:dokka-maven-plugin from 1.8.20 to 1.9.0","build(deps): bump org.json:json from 20230618 to 20231013","build(deps): bump org.owasp:dependency-check-maven from 8.3.1 to 8.4.0","build(deps): bump org.springframework.cloud:spring-cloud-dependencies from 2022.0.3 to 2022.0.4","build(deps): bump protobuf.version from 3.23.4 to 3.24.0","build(deps): bump protobuf.version from 3.24.0 to 3.24.1","build(deps): bump protobuf.version from 3.24.1 to 3.24.2","build(deps): bump protobuf.version from 3.24.2 to 3.24.3","build(deps): bump protobuf.version from 3.24.3 to 3.24.4","build(deps): bump spring.boot.version from 3.1.2 to 3.1.3","build(deps): bump spring.boot.version from 3.1.3 to 3.1.4","Maven 3.9.5"]},{"i":"other-changes","l":"\uD83D\uDD27 Other Changes","p":["5.2.1-SNAPSHOT","[FI-1693] Remove Jenkins-test from merge workflow","Workflows: update cluster IP from 10.19.0.67 to 10.19.0.242 and","Workflows: remove VPN to FRINX for postgresDB.","Workflows: update path to new VM of postgresDB.","Workflows: update path and remove FRINX VPN for embeded tests.","Removed forgotten LOG","Release 5.2.1"]}],[{"i":"uniconfig-522","l":"Uniconfig 5.2.2"},{"i":"whats-changed","l":"What's Changed"},{"i":"bug-fixes","l":"\uD83D\uDC1E Bug Fixes","p":["[UNIC-1405] UniConfig shell: set nested JSON data","[UNIC-1429] Fix replace operation in GNMI set (MAIN)","[UNIC-1450] Spotbugs fixes - Bad practice","[UNIC-1471] : Fix sync fails after failed installation was stored in DB","[UNIC-1471] Add schema-cache storing into sync impl","[UNIC-1474] Improve performance of YANG repository loading process during mounting process","[UNIC-1475]: generalizing information about expired transaction","[UNIC-1494] - add migration for replace-paths","Add fix for reading duplicate properties","Caching request body copier","Fix bad migration embedded kafka properties from old UC version to new","Fix exception in loading yang schemas","Fix get fallback schema context in cli shell.","Fix update property value to null bug","Removed the forgotten callbacks-models dependencies","Set forgotten crypto properties in creation crypto config.","UniConfig Shell - fix prompt callbacks bug","Use a Set instead of a List in MibRepository"]},{"i":"new-features","l":"✅ New Features","p":["[UNIC-1075] Uniconfig shell hide / unhide command implementation","[UNIC-1028] Connect/Disconnect node RPC"]},{"i":"improvements","l":"\uD83D\uDCA1 Improvements","p":["[UNIC-1408] UniConfig Shell - adjust cached data","[UNIC-1374] Fixed sending install-node RPC request without mandatory fields","[UNIC-1445] Refactor yang-repo to JOOQ style","Refactoring Properties","Add spotbugs-maven-plugin configuration","Optimize DB read-only transaction","Improved logging","Add Google ErrorProne plugin","[UNIC-1487] return to the same mode when transaction expires"]},{"i":"api-changes","l":"\uD83D\uDDA5️ API Changes","p":["[UNIC-1290] Refactor data-change-events RPCs"]},{"i":"dependency-upgrades","l":"\uD83D\uDD28 Dependency Upgrades","p":["build(deps): bump com.github.spotbugs:spotbugs-annotations from 4.7.3 to 4.8.0","build(deps): bump com.google.guava:guava from 32.1.2-jre to 32.1.3-jre","build(deps): bump org.codehaus.woodstox:stax2-api from 4.2.1 to 4.2.2","build(deps): bump io.swagger.core.v3:swagger-core from 2.2.16 to 2.2.17","build(deps): bump com.fasterxml.jackson.core:jackson-databind from 2.15.2 to 2.15.3","build(deps): bump org.apache.maven.plugins:maven-plugin-plugin from 3.9.0 to 3.10.1","build(deps): bump org.jetbrains.dokka:dokka-maven-plugin from 1.9.0 to 1.9.10","build(deps): bump io.github.git-commit-id:git-commit-id-maven-plugin from 6.0.0 to 7.0.0","build(deps-dev): bump org.apache.maven.plugin-tools:maven-plugin-annotations from 3.9.0 to 3.10.1","build(deps): bump grpc.version from 1.58.0 to 1.59.0","build(deps): bump spring.boot.version from 3.1.4 to 3.1.5","build(deps): bump sshd.version from 2.10.0 to 2.11.0","build(deps): bump org.owasp:dependency-check-maven from 8.4.0 to 8.4.2"]},{"i":"other-changes","l":"\uD83D\uDD27 Other Changes","p":["Rename distro/uniconfig-modules/uniconfig to main","Prepared sample docker compose file","Move main.jar into the root","Set kafka enabled to false","Properties overhaul","Suppress CVEs","Use NetconfCacheLoader in gnmi-sb instead of custom yang parsing","Fix binding empty properties","Rewrite client to new properties RPC","Fix dependency management for starting UniConfig","Release 5.2.2"]}],[{"l":"Translation Units","p":["This repository contains documentation for all available translation units for the FRINX ODL CLI service module. A translation unit is a piece of code that includes handlers to read from or write to a specific device (e.g. Cisco IOS classic router) and facilitates the translation in OpenConfig models. The purpose of this documentation is to see which commands can be read and set and how they map to the respective YANG models. Every section has a README file that provides an overview of all show and configuration commands that are supported. Multiple translation units are finally packaged together and made available as a karaf feature that can be installed at runtime."]},{"l":"Table of Contents","p":["URL","URL Operations","GET","PUT","DELETE","OPERATIONAL datasets","OPENCONFIG YANG","OS COMMANDS","DEVICE YANG","UNIT","CONFIGURATION datasets"]},{"l":"URL","p":["can be either cli or unified","each list item argument MUST be followed by list key. Usually the key is mapped to just one leaf (identifier, name, etc.), but in some cases, the key is created using more leafs. In this case, in the URL, the keys follow each other in order specified by YANG.","Each URL has a base format:","Example:","for each container or list in YANG model, there MUST be an argument in the URL","if the URL is tied with a body, the top-level element in the body must be the last element in the URL","Let's say you want to be even more specific and list details just about one particular interface. You can view the data by adding 'interface=' to the URL.","Let's say you want to list all areas in a specific OSPF. To obtain this data, you can trim the part: '/area=/interfaces' from the URL.","mountpoint name","network-instances argument is a list. We want to specify one item from that list (specific network instance), therefore the URL continues with ‘network-instance'. The key in network-instance is the identifier '' (e.g. vrf1) which follows the list item argument. Complex key is needed for protocol argument. The key is protocol-type followed by process-id. (frinx-openconfig-policy-types:OSPF, )","same for 'area='","Simplified example:","The general steps in creating the URL are following:","the top level argument must contain the name of the model. The name of the model must also be specified for YANG identities.","the URL contains an identity which is a part of a key for protocol list. This identity is prefixed by model name: ‘frinx-openconfig-policy-types:OSPF’","The URL will always point to either operational or config datastore and to the node we want to get the information from. You can always check if the particular device is registered by issuing GET on :","top-level argument contains also YANG model name: ‘frinx-openconfig-network-instance’","URLs are modular. By changing the URL you can move along the YANG data tree.","very specific URL listing interfaces under one specific area in OSPF under specific VRF","You can create a minimalistic YANG tree out of the URL:"]},{"l":"URL Operations","p":["Each show command supports only one http operation: GET ."]},{"l":"GET","p":["GET operation can be issued on both config/operational datastore. Config datastore reflects how the device is configured. Operational datastore reflects the state of the device. In most cases the information is the same.","Example of a case where the information is not the same (the only difference in requests is config vs operational):","Configuration commands support PUT for create/replace data. This operation requires HTTP body, which contains openconfig YANG model of the configuration you want to send to the router. Another operation supported by configuration commands is DELETE, which removes data from the device. Both operations need to be issued on config datastore.","For modifications of the data, you can use also PATCH method, that does not replace the entire data structure, only the parts that are different.","Example:","We want to create a new BGP neighbor:","The IOS command is:"]},{"l":"PUT","p":["BODY:","WARNING: PUT operation does not merge data. In this example if you have already configured some BGP neighbors, this request will REMOVE all of them and create just the one described in the PUT body. The solution is to first issue GET, copy existing configuration and add/change items there, or use PATCH method.","If we want to DELETE a BGP neighbor, the body is not needed, the URL needs to be specific to the neighbor we want to delete:"]},{"l":"DELETE","p":["This operation will issue following command:","DELETE operation always removes the last argument of the URL."]},{"l":"OPERATIONAL datasets","p":["Go to operational datasets","Show commands are commands that usually on Cisco device start with 'show'. The aim is to obtain data from the router."]},{"i":"url-1","l":"URL","p":["GET operation issued on operational datastore"]},{"l":"OPENCONFIG YANG","p":["In case of show commands this section is a sample output of a particular show command."]},{"l":"OS COMMANDS","p":["In this section we list the actual router commands with sample outputs, where the data obtained and transformed into Openconfig YANG is marked as bold. We list show commands and outputs for each supported device OS.","IOS XR | IOS Classic/XE | Junos | SAOS"]},{"l":"DEVICE YANG","p":["In case of CLI units, the unit parses the output of the CLI command directly into OC YANG. In case of Netconf units, the output is mapped to OC YANG through Device YANG (YANG model supported by the device). In case of Netconf units, the YANG is also written in documentation. This section is a link to XML unit test input testing this operation."]},{"l":"UNIT","p":["Link to github code where this show commmand is implemented along with unit version range."]},{"l":"CONFIGURATION datasets","p":["Go to configuration datasets"]},{"i":"url-2","l":"URL","p":["PUT operation with given URL will result in creating of data in config datastore DELETE operation with given URL will result in removing data in config datastore"]},{"i":"openconfig-yang-1","l":"OPENCONFIG YANG","p":["In case of configuration commands, this section represents the HTTP body in PUT operation"]},{"i":"os-commands-1","l":"OS COMMANDS","p":["In this section we list the actual router commands that are mapped to the Openconfig YANG model. Data transformed into Openconfig YANG is marked as bold. We list commands for each supported device OS.","IOS XR | IOS Classic/XE | Junos | SAOS"]},{"i":"device-yang-1","l":"DEVICE YANG","p":["In case od Netconf units, the device yang represents command sent to the device in device YANG model. This section is a link to XML unit test input testing this configuration."]},{"i":"unit-1","l":"UNIT","p":["Link to github code where this config commmand is implemented along with unit version range."]}],[{"l":"IETF L2VPN YANG"},{"l":"Scenario"},{"i":"l2p2pvpws","l":"L2P2P/VPWS","p":["l2vpn-instance/type == vpws-instance-type only two endpoints"]},{"l":"Local-Local","p":["connection between two local ports on a host (pe-node-id`s of endpoints match)"]},{"i":"ietf--yang","l":"IETF YANG"},{"l":"OPENCONFIG YANG"},{"l":"pe01"}],[{"l":"IETF L2VPN YANG"},{"l":"Scenario"},{"i":"l2p2pvpws","l":"L2P2P/VPWS","p":["l2vpn-instance/type == vpws-instance-type only two endpoints"]},{"l":"Local-Remote","p":["connection between local and remote hosts (pe-node-id`s of endpoints do not match)"]},{"i":"ietf--yang","l":"IETF YANG"},{"l":"OPENCONFIG YANG"},{"l":"pe01"},{"l":"PE2"}],[{"l":"IETF L2VPN YANG"},{"l":"Scenario"},{"i":"l2p2pvpls","l":"L2P2P/VPLS","p":["l2vpn-instance/type == vpls-instance-type Two or more endpoints"]},{"i":"ietf--yang","l":"IETF YANG"},{"l":"OPENCONFIG YANG"},{"l":"pe01"},{"l":"pe02"},{"l":"pe03"}],[{"l":"IETF L3VPN YANG"},{"l":"IETF YANG"},{"l":"OPENCONFIG YANG"}],[{"i":"#","p":["Access control","ACL","ACL interfaces","BGP","CDP","connection point","connection point l2vpn","Discovery protocols","Ethernet interface","Ethernet OAM","Ethernet Virtual Circuit","Ethernet Virtual Private Network","EVC","EVPN","FDP","Hot Standby Router Protocol","HSRP","Interfaces","Internet Protocol Security","IPsec","IS-IS","L2P2P","L2VPN","L3 VLAN interface","L3VPN","l3vpn with BGP","l3vpn with OSPF","LAG interface","Monitoring","MPLS","MPLS LDP","MPLS TE","MPLS TE RSVP","MPLS Tunnel","NetFlow","NetFlow interfaces","Network Instance","Network Instances","OSPF","OSPFv3","PF interfaces","Policy Forwarding","Probes","Protocols","Quality of Service","Routing Policy","SNMP","Spanning Tree Protocol","STP","SYSLOG"]},{"l":"Interfaces"},{"l":"Ethernet interface"},{"l":"LAG interface"},{"l":"L3 VLAN interface"},{"l":"Network Instances"},{"l":"Network Instance"},{"l":"Protocols"},{"l":"BGP"},{"l":"OSPF"},{"l":"OSPFv3"},{"l":"IS-IS"},{"l":"MPLS"},{"l":"MPLS TE"},{"l":"MPLS Tunnel"},{"l":"MPLS TE RSVP"},{"l":"MPLS LDP"},{"l":"Policy Forwarding"},{"l":"PF interfaces"},{"l":"L2P2P"},{"l":"connection point"},{"l":"L2VPN"},{"l":"connection point l2vpn"},{"l":"L3VPN"},{"l":"l3vpn with BGP"},{"l":"l3vpn with OSPF"},{"l":"Discovery protocols"},{"l":"CDP"},{"l":"FDP"},{"l":"Monitoring"},{"l":"SNMP"},{"l":"SYSLOG"},{"l":"Probes"},{"l":"Ethernet OAM"},{"l":"Hot Standby Router Protocol"},{"l":"HSRP"},{"l":"Access control"},{"l":"ACL"},{"l":"ACL interfaces"},{"l":"Spanning Tree Protocol"},{"l":"STP"},{"l":"Routing Policy"},{"i":"routing-policy-1","l":"Routing Policy"},{"l":"NetFlow"},{"l":"NetFlow interfaces"},{"l":"Quality of Service"},{"i":"quality-of-service-1","l":"Quality of Service"},{"l":"Ethernet Virtual Private Network"},{"i":"ethernet-virtual-private-network-1","l":"Ethernet Virtual Private Network"},{"l":"Internet Protocol Security"},{"l":"IPsec"}],[{"l":"Access Control List"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534-ios-xr-662","l":"Cisco IOS XR 5.3.4, IOS XR 6.6.2"},{"l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xe-1542s","l":"Cisco IOS XE 15.4(2)S"},{"i":"cli-1","l":"CLI"},{"l":"Examples"},{"i":"unit-1","l":"Unit","p":["Link to github : xe-unit"]},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-2","l":"CLI"},{"i":"unit-2","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-3","l":"CLI"},{"i":"unit-3","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"i":"cli-4","l":"CLI","p":["iacl_intf_index, iacl_subintf_index is a conversion of set ."]},{"i":"unit-4","l":"Unit","p":["Link to github : junos-unit"]}],[{"l":"Access Control List"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"l":"Cisco IOS Classic"},{"l":"CLI","p":["ipv4|ipv6 is a conversion of* eq|neq|range * is a conversion of or , operation is selected by entered port range *eq|neq|range * is a conversion of or , operatioons is selected by entered port range | acl option could be defined by enumeration named options or by number in range 0-255 ** is a conversion of , when true, value is \"established\", when false, there is empty value \"\""]},{"l":"Examples"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"i":"cli-1","l":"CLI","p":["ipv4|ipv6 is a conversion of"]},{"i":"examples-1","l":"Examples"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"i":"cli-2","l":"CLI","p":["ipv4|ipv6 is a conversion of"]},{"i":"examples-2","l":"Examples"},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xe-1542s","l":"Cisco IOS XE 15.4(2)S"},{"i":"cli-3","l":"CLI","p":["** is a conversion of , when true, value is \"established\", when false, there is empty value \"\""]},{"i":"examples-3","l":"Examples"},{"i":"unit-2","l":"Unit","p":["Link to github : xe-unit"]},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-4","l":"CLI"},{"i":"unit-3","l":"Unit","p":["Link to github : junos-unit"]},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"i":"cli-5","l":"CLI","p":["conversion is ACCEPT = allow, DROP = deny* access-list disable profile * is a conversion of frinx-acl-extension:enabled set to false. Default value is true."]},{"i":"unit-4","l":"Unit","p":["Link to github : [saos-unit]"]}],[{"l":"cable DOWNSTREAM CONTROLLER-PROFILE"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE 16"},{"l":"CLI","p":["can be either a single number or a list of numbers (0 31 which represents all the values from 0 to 31)"]},{"l":"Unit","p":["Link to github : ios-xe-unit"]}],[{"l":"cable FIBER-NODE"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE 16"},{"l":"CLI","p":["and are in commands input with whitespace dividing name and number (Downstream-Cable 1/0/16)"]},{"l":"Unit","p":["Link to github : ios-xe-unit"]}],[{"l":"cable RPD"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE 16"},{"l":"CLI","p":["and are in commands input with whitespace dividing name and number (Downstream-Cable 1/0/16)","no principal is a conversion of set false principal is a conversion of set true"]},{"l":"Unit","p":["Link to github : ios-xe-unit"]}],[{"l":"BRIDGE interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS 12"},{"l":"CLI","p":["is parsed from example is BDI10 -> is 10","no shutdown is a conversion of set true shutdown is a conversion of set false no snmp trap link-status is a conversion of set false snmp trap link-status is a conversion of set true"]},{"l":"Unit","p":["Link to github : ios-unit"]}],[{"l":"CABLE interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE 16"},{"l":"CLI","p":["are read from all lines and input into one attribute \"rf-channels\" and are in commands input with whitespace dividing name and number (Downstream-Cable 1/0/16)"]},{"l":"Unit","p":["Link to github : ios-xe-unit"]}],[{"l":"Ethernet interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI","p":["is a conversion of no shutdown is a conversion of set true shutdown is a conversion of set false switchport port-security is a conversion of set true no switchport port-security is a conversion of set false can be \"protect\", \"restrict\" or \"shutdown\" can be \"absolute\" or \"inactivity\" switchport port-security aging static is a conversion of set true no switchport port-security aging static is a conversion of set false lldp transmit is a conversion of set true no lldp transmit is a conversion of set false lldp receive is a conversion of set true no lldp receive is a conversion of set false negotiation auto is a conversion of set true no negotiation auto is a conversion of set false cdp enable is a conversion of set true no cdp enable is a conversion of set false is parsed from example is Port-channel3 -> is 3 mode on is a conversion of set to frinx-openconfig-lacp:ON can be \"default\" or \"rj45\" or \"sfp\" can be \"broadcast\" or \"multicast\" or \"unicast\"","is conversion of"]},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xe-15-16-17","l":"Cisco IOS XE 15, 16, 17"},{"i":"cli-1","l":"CLI","p":["is a conversion of no shutdown is a conversion of set true shutdown is a conversion of set false normal is a conversion of \"\" set \"NORMAL\" fast is a conversion of \"\" set \"FAST\" lldp transmit is a conversion of set true no lldp transmit is a conversion of set false lldp receive is a conversion of set true no lldp receive is a conversion of set false negotiation auto is a conversion of set true no negotiation auto is a conversion of set false is parsed from example is Port-channel3 -> is 3 mode on is a conversion of set to frinx-openconfig-lacp:ON can be \"default\" or \"rj45\" or \"sfp\" can be \"broadcast\" or \"multicast\" or \"unicast\" service instance trunk ethernet is conversion of set true* service instance ethernet * is conversion of set false* encapsulation untagged , dot1q * is conversion of set true* encapsulation dot1q * is conversion of set false can be \"ingress\" or \"egress\" can be \"pop\" or \"push\" or \"translate\" can be \"tunnel\" or \"peer\" or \"forward\" can be \"cdp\" or \"vtp\" or \"lacp\" or \"lldp\" or \"mmrp\" or \"mvrp\" or \"stp\" or \"RB\" or \"RC\" or \"RD\" or \"RF\""]},{"i":"unit-1","l":"Unit","p":["Link to github : ios-xe-unit"]},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"i":"cli-2","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false is a conversion of no dampening is a conversion of set false lacp period short is a conversion of set to frinx-openconfig-lacp:FAST no lacp period short is a conversion of set to frinx-openconfig-lacp:SLOW if is not specified then command bundle id mode on is used mode active is a conversion of set to frinx-openconfig-lacp:ACTIVE mode passive is a conversion of set to frinx-openconfig-lacp:PASSIVE ipv6 nd suppress-ra is a conversion of set true","is conversion of"]},{"i":"unit-2","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-623","l":"Cisco IOS XR 6.2.3"},{"i":"cli-3","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false no dampening is a conversion of set false lacp period short is a conversion of set to frinx-openconfig-lacp:FAST no lacp period short is a conversion of set to frinx-openconfig-lacp:SLOW if is not specified then command bundle id mode on is used mode active is a conversion of set to frinx-openconfig-lacp:ACTIVE mode passive is a conversion of set to frinx-openconfig-lacp:PASSIVE","is conversion of"]},{"i":"cisco-ios-xr-661","l":"Cisco IOS XR 6.6.1"},{"i":"cli-4","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false is a conversion of no dampening is a conversion of set false lacp period short is a conversion of set to frinx-openconfig-lacp:FAST no lacp period short is a conversion of set to frinx-openconfig-lacp:SLOW if is not specified then command bundle id mode on is used mode active is a conversion of set to frinx-openconfig-lacp:ACTIVE mode passive is a conversion of set to frinx-openconfig-lacp:PASSIVE","is conversion of"]},{"i":"unit-3","l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-5","l":"CLI","p":["vlan-tagging is a conversion of set TPID_0X8100 delete interfaces disable is a conversion of set true set interfaces disable is conversion of set false","set interfaces unit disable is conversion of set false delete interfaces unit disable is a conversion of set true"]},{"i":"unit-4","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-6","l":"CLI","p":["is parsed from example is ae100 -> is 100","delete interfaces disable is a conversion of set true set interfaces disable is conversion of set false"]},{"i":"unit-5","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"i":"cli-7","l":"CLI","p":["delete interfaces disable is a conversion of set true set interfaces disable is conversion of set false In the case of set interfaces ms-x/x/x, set iana-if-type:other instead of iana-if-type:ethernetCsmacd","delete interfaces unit disable is a conversion of set true set interfaces unit disable is conversion of set false"]},{"i":"unit-6","l":"Unit","p":["Link to github : junos-unit"]},{"i":"brocade-v560ft163","l":"Brocade (V5.6.0fT163)"},{"i":"cli-8","l":"CLI","p":["enable is a conversion of set true disable is a conversion of set false"]},{"i":"unit-7","l":"Unit","p":["Link to github : brocade-unit"]},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"i":"cli-9","l":"CLI","p":["is conversion of \"\" inbound, outbound is a conversions of \"\" trust dscp is a conversion of \"\" set true"]},{"i":"unit-8","l":"Unit","p":["Link to github : huawei-unit"]},{"i":"dasan-nos-sfurr56p5","l":"Dasan NOS SFU.RR.5.6p5"},{"i":"cli-10","l":"CLI","p":["is parsed from example is Ethernet1/1 -> is 1/1","is parsed from example is Bundle-Ether100 -> is 100","* port enable * is a conversion of set true* port disable * is a conversion of set false lacp port timeout short is a conversion of set to frinx-openconfig-lacp:FAST no lacp port timeout short is a conversion of set to frinx-openconfig-lacp:SLOW"]},{"i":"unit-9","l":"Unit","p":["Link to github : dasan-unit"]},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"i":"cli-11","l":"CLI","p":["* port enable port * is a conversion of set true* port disable port * is a conversion of set false can be \"default\" or \"rj45\" or \"sfp\" vs-ingress-filter on is a conversion of set true vs-ingress-filter off is a conversion of set false can be \"all\", \"tagged-only\", \"untagged-only\" can be \"Default-RCOS\" or \"NNI-NNI\" forward-unlearned on is a conversion of set true forward-unlearned off is a conversion of set false resolved-cos-remark-l2 true is a conversion of set true resolved-cos-remark-l2 false is a conversion of set false","from usual range (max 4094)","can be \"all\" or \"vlan-tpid\"","l2-cft enable port is a conversion of set to true l2-cft disable port is a conversion of set to false","rstp enable port is a conversion of set to true rstp disable port is a conversion of set to false mstp enable port is a conversion of set to true mstp disable port is a conversion of set to false","port set port auto-neg on is a conversion of set to true port set port auto-neg off is a conversion of set to false can be auto, ten, hundred, gigabit, ten-gig"]},{"i":"unit-10","l":"Unit","p":["Link to github : saos-unit"]},{"l":"Ciena SAOS 8"},{"i":"cli-12","l":"CLI","p":["can be between 0 and 96","port set port auto-neg on is a conversion of set to true port set port auto-neg off is a conversion of set to false can be auto, ten, hundred, gigabit, ten-gig, forty-gig, hundred-gig"]},{"i":"unit-11","l":"Unit","p":["Link to github : saos-unit"]},{"i":"arris-cer-arris-e6000","l":"Arris CER (Arris E6000)"},{"i":"cli-13","l":"CLI","p":["no shutdown is a conversion of set true shutdown is a conversion of set false"]},{"i":"unit-12","l":"Unit","p":["Link to github : cer-unit"]}],[{"l":"L2VLAN interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"Ciena SAOS8"},{"l":"CLI"},{"l":"Unit","p":["Link to github : saos-unit"]}],[{"l":"L3 VLAN interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"dasan-nos-sfurr56p5","l":"Dasan NOS SFU.RR.5.6p5"},{"l":"CLI","p":["is parsed from example is Vlan10 -> is 10","no shutdown is a conversion of set true shutdown is a conversion of set false no ip redirects is a conversion of set false ip redirects is a conversion of set true"]},{"l":"Unit","p":["Link to github : dasan-unit"]}],[{"i":"link-aggregation-group-bundle-interface","l":"Link Aggregation Group (bundle) interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"Cisco IOS XE","p":["track shutdown is a conversion of set true no track shutdown is a conversion of set false"]},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)","p":["is conversion of"]},{"i":"unit-1","l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false is conversion of no dampening is a conversion of set false ipv6 nd suppress-ra is a conversion of set true"]},{"i":"unit-2","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-623","l":"Cisco IOS XR 6.2.3"},{"i":"cli-1","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false is conversion of no dampening is a conversion of set false ipv6 enable is a conversion of set true no ipv6 enable is a conversion of set false","is conversion of"]},{"i":"unit-3","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661","l":"Cisco IOS XR 6.6.1"},{"i":"cli-2","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100 is parsed from example is aa:bb:cc:dd:ee:ff -> aabb.ccdd.eeff is parsed from example is aa:bb:cc:dd:ee:ff -> aabb.ccdd.eeff","is conversion of no shutdown is a conversion of set true shutdown is a conversion of set false no dampening is a conversion of set false","is parsed from example is Bundle-Ether100 -> is 100","is conversion of"]},{"i":"unit-4","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"i":"cli-3","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100","no shutdown is a conversion of set true shutdown is a conversion of set false is conversion of ethernet cfm is a conversion of set to true no ethernet cfm is a conversion of set to false"]},{"i":"unit-5","l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-4","l":"CLI","p":["is parsed from example is ae100 -> is 100","delete interface ae disable is a conversion of set true set interface ae disable is conversion of set false","Device does not support damping on LAG interface."]},{"i":"unit-6","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"i":"cli-5","l":"CLI","p":["is parsed from example is ae100 -> is 100","delete interface ae disable is a conversion of set true set interface ae disable is conversion of set false","inner_vlan_tag, outer_vlan_tag is a conversion of set . delete interface ae unit disable is a conversion of set true set interface ae unit disable is conversion of set false rpm_ifc_index , rpm_subintf_index is a conversion of set ."]},{"i":"unit-7","l":"Unit","p":["Link to github : junos-unit"]},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"i":"cli-6","l":"CLI","p":["is conversion of"]},{"i":"unit-8","l":"Unit","p":["Link to github : huawei-unit"]},{"i":"dasan-nos-sfurr56p5","l":"Dasan NOS SFU.RR.5.6p5"},{"i":"cli-7","l":"CLI","p":["is parsed from example is Bundle-Ether100 -> is 100 and prefix is Bundle-Ether Dasan supports two kinds of prefixes (Prefix is settled by lag type)","If the prefix of is 'Trunk', lag type is port trunking","If the prefix of is 'Bundle-Ether', lag type is lacp","vlan add br t/ tagged is only supported by port trunking vlan add br t/ untagged is only supported by port trunking"]},{"i":"unit-9","l":"Unit","p":["Link to github : dasan-unit"]},{"l":"Ciena SAOS8"},{"i":"cli-8","l":"CLI","p":["classifier-precedence is used as **. This field is mandatory in Ciena and unique withing parent-port. bin_count can be from \"0\" to \"96\". Default value is \"32\". when ** is set to true, then vlan-untagged-data is used in the the sub-port command. there is not possible to set vlan-untagged-data and vtag-stack both."]},{"i":"unit-10","l":"Unit","p":["Link to github : saos-unit"]},{"l":"Arris CER"},{"i":"cli-9","l":"CLI"},{"i":"unit-11","l":"Unit","p":["Link to github : cer-unit"]}],[{"l":"WIDEBAND interface"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE 16"},{"l":"CLI"},{"l":"Unit","p":["Link to github : ios-xe-unit"]}],[{"i":"internet-protocol-security-ipsec","l":"Internet Protocol Security (IPsec)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"nokia-sros-160","l":"NOKIA SROS 16.0"},{"l":"CLI","p":["no shutdown is a conversion of set to true shutdown is a conversion of set to false"]}],[{"l":"NetFlow"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["Assumption is that monitor map and sampler map configuration already exist on a device."]},{"l":"Unit","p":["Link to github : xr-unit"]}],[{"l":"L2P2P configuration"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-vios-1562t","l":"Cisco IOS (VIOS 15.6(2)T)"},{"l":"CLI","p":["If connection points remote and local without subif","If connection points remote and local with subif","If both connection points are type local without subif","If both connection points are type local with subif"]},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xr-513-612","l":"CISCO IOS XR (5.1.3) (6.1.2)"},{"i":"cli-1","l":"CLI","p":["If connection point type remote","If connection point type local without subif","If connection point type local with subif (for XRv 5.1.3)","If connection point type local with subif (for XRv 6.1.2)","If both connection points are local we can use the same translation code as above .. the combined output will look like this example:"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"brocade-v560ft163","l":"Brocade (V5.6.0fT163)"},{"i":"cli-2","l":"CLI","p":["If connection point type remote","If connection point type local without subif","If both connection points are type local","With subif","If both connection points are type local without subif"]},{"i":"unit-2","l":"Unit","p":["Link to github : brocade-unit"]}],[{"i":"l2vpn-vpls-with-bgp-autodiscovery-configuration","l":"L2VPN (VPLS with BGP autodiscovery) configuration"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-not-fully-tested-yet--vios-does-not-support-vpls","l":"Cisco IOS (not fully tested yet ... vIOS does not support VPLS)"},{"l":"CLI","p":["If connection point type remote","If connection point type local without subif","If connection point type local with subif"]},{"l":"Unit","p":["NOT IMPLEMENTED"]},{"i":"cisco-ios-xr-513-612","l":"CISCO IOS XR (5.1.3) (6.1.2)"},{"i":"cli-1","l":"CLI","p":["If connection point type remote","If connection point type local without subif","If connection point type local with subif (for XRv 5.1.3)","If connection point type local with subif (for XRv 6.1.2)"]},{"i":"unit-1","l":"Unit","p":["NOT IMPLEMENTED"]}],[{"i":"l2vsi-l2-virtual-switch-instance-virtual-circuit","l":"L2VSI (L2 virtual switch instance virtual circuit)","p":["Interconnects L2VSI with a vlan-based upstream path"]},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"l":"CLI","p":["statistics on is a conversion of set true statistics off is a conversion of set false"]},{"l":"Unit"}],[{"i":"l2vsi-l2-virtual-switch-instance","l":"L2VSI (L2 virtual switch instance)"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"l":"CLI","p":["can have values *l2-cft tagged-pvst-l2pt enable vs * is a conversion of \"tagged-pvst-l2pt\" field set to true *l2-cft tagged-pvst-l2pt disable vs * is a conversion of \"tagged-pvst-l2pt\" field set to false"]},{"l":"Ciena SAOS 8"},{"i":"cli-1","l":"CLI","p":["cpu-subinterface command is sent, if the type of the interface added is iana-if-type:l2vlan","sub-port command is sent, if the type of the interface added is iana-if-type:ieee8023adLag","** in this case needs to have form . This can be derived from : https://github.com/FRINXio/translation-units-docs/blob/master/Configuration%20datasets/interfaces/lag_interface.md"]}],[{"i":"l3vpn-configuration-bgp-as-ce-pe-protocol","l":"L3VPN configuration (BGP as CE-PE protocol)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["CONSTRAINTS Network-instance with name must exist before defined-sets or both must be created in the same transaction. Delete must be executed in reverse order or in the same transaction. Policy -route-target-import and -route-target-export must exist on device before are used in network-instance."]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-513-612","l":"CISCO IOS XR (5.1.3) (6.1.2)"},{"l":"CLI"},{"i":"cisco-ios-xr-661","l":"CISCO IOS XR (6.6.1)"},{"i":"cli-1","l":"CLI","p":["summary-only is a conversion of \"frinx-bgp-extension:summary-only\" set true"]},{"i":"cisco-ios-vios-1562t","l":"Cisco IOS (VIOS 15.6(2)T)"},{"i":"cli-2","l":"CLI"},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"i":"cli-3","l":"CLI"},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"i":"cli-4","l":"CLI","p":["1 to 100000 is conversion of set \"frinx-huawei-network-instance-extension:prefix-limit-from\" 1 to 100 is conversion of set \"frinx-huawei-network-instance-extension:prefix-limit-to\""]},{"l":"Unit","p":["Link to github : huawei-unit"]}],[{"i":"l3vpn-configuration-ospf-as-ce-pe-protocol","l":"L3VPN configuration (OSPF as CE-PE protocol)"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-513-612","l":"CISCO IOS XR (5.1.3) (6.1.2)"},{"l":"CLI"},{"i":"cisco-ios-xr-623","l":"CISCO IOS XR (6.2.3)"},{"i":"cli-1","l":"CLI"},{"i":"cisco-ios-xr-661","l":"CISCO IOS XR (6.6.1)"},{"i":"cli-2","l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-vios-1562t","l":"Cisco IOS (VIOS 15.6(2)T)"},{"i":"cli-3","l":"CLI"},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-4","l":"CLI","p":["virtual-router is a conversion of set L3VRF delete routing-instances protocols ospf area interface disable is a conversion of set true set routing-instances protocols ospf area interface disable is a conversion of set false set routing-instances protocols ospf area interface authentication is a conversion of set true delete routing-instances protocols ospf area interface authentication is a conversion of set false"]},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"i":"cli-5","l":"CLI"}],[{"i":"multiprotocol-label-switching---label-distribution-protocol-mpls-ldp","l":"Multiprotocol Label Switching - Label Distribution Protocol (MPLS LDP)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models","extensions to MPLS YANG model"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["\"enabled\" MUST be set to true when any ldp-configuration is pushed","\"enabled\" set to false, will ignore any additional configuration in the PUT request and will result in 'no mpls ldp'"]},{"l":"Unit","p":["Link to github : xr-unit"]}],[{"i":"multiprotocol-label-switching---resource-reservation-protocol-mpls-rsvp","l":"Multiprotocol Label Switching - Resource Reservation Protocol (MPLS RSVP)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models","extensions to MPLS YANG model"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4","p":["setting to default results in 'bandwidth' meaning setting default device bandwidth","setting to numeric value results in 'bandwith < number>","transformation: input bandwith in bps, in XR router as Kbps"]},{"l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10","p":["transformation: k,m,g from JUNOS router translates to thousand, million, billion"]},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to github : junos-unit"]}],[{"i":"multiprotocol-label-switching---traffic-engineering-mpls-te","l":"Multiprotocol Label Switching - Traffic Engineering (MPLS-TE)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models","extensions to MPLS YANG model"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["\"enabled\" MUST be set to true when any te-configuration is pushed","\"enabled\" set to false, will ignore any additional configuration in the PUT request and will result in 'no mpls traffic-eng'"]},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to github : junos-unit"]}],[{"i":"multiprotocol-label-switching---tunnel","l":"Multiprotocol Label Switching - Tunnel"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models","extensions to MPLS YANG model"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["autoroute announce is a conversion of set true no autoroute announce is a conversion of set false load-share is not supported on virtual platform CISCO IOS-XR mpls_tunnel_destination is optional parameter metric absolute command is only valid if autoroute announce is set"]},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-1","l":"CLI","p":["* set protocols mpls label-switched-path * is a conversion of set true mpls_tunnel_destination is mandatory parameter"]},{"i":"unit-1","l":"Unit","p":["Link to github : junos-unit"]}],[{"l":"Interface policy configuration"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["policy-forwarding YANG model","extensions to policy-forwarding YANG model"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534-ios-xr-662","l":"Cisco IOS XR 5.3.4, IOS XR 6.6.2"},{"l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-623","l":"Cisco IOS XR 6.2.3"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661","l":"Cisco IOS XR 6.6.1"},{"i":"cli-2","l":"CLI"},{"i":"unit-2","l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-3","l":"CLI","p":["pf_intf_index, pf_subintf_index is a conversion of set ."]},{"i":"unit-3","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-4","l":"CLI"},{"i":"unit-4","l":"Unit","p":["Link to github : junos-unit"]}],[{"i":"border-gateway-protocol-bgp","l":"Border Gateway Protocol (BGP)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST remove-private-AS is a conversion of default-originte is a conversion of next-hop-self is a conversion of if value is \"nexthopself\" no shutdown is a conversion of set true shutdown is a conversion of set false ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST vpnv4 unicast is a conversion of set L3VPN_IPV4_UNICAST"]},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661-via-netconf","l":"Cisco IOS XR 6.6.1 (via NetConf)"},{"i":"cli-1","l":"CLI","p":["l2vpn evpn is a conversion of set L2VPN_EVPN remove-private-AS is a conversion of default-originte is a conversion of next-hop-self is a conversion of if value is \"nexthopself\" no shutdown is a conversion of set true shutdown is a conversion of set false l2vpn evpn is a conversion of set L2VPN_EVPN"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661-no-netconf","l":"Cisco IOS XR 6.6.1 (no NetConf)"},{"i":"cli-2","l":"CLI","p":["vpnv4 unicast is a conversion of set L3VPN_IPV4_UNICAST"]},{"i":"unit-2","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"i":"cli-3","l":"CLI","p":["ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST remove-private-AS is a conversion of default-originte is a conversion of ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST"]},{"i":"unit-3","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xe-031301s","l":"Cisco IOS XE 03.13.01.S","p":["bgp log-neighbor-changes is a conversion of set true no bgp log-neighbor-changes is a conversion of set false default-information originate is a conversion of set true no default-information originate is a conversion of set false neighbor as-override is a conversion of set true neighbor fall-over bfd is a conversion of set true transport connection-mode passive is a conversion of set true route-reflector-client is a conversion of set true remove-private-as is a conversion of set \"frinx-openconfig-bgp-types:PRIVATE_AS_REMOVE_ALL\" no-prepend is a conversion of set true replace-as is a conversion of set true neighbor version 4 is a conversion of set \"frinx-bgp-extension:VERSION_4\" auto-summary is a conversion of set true no auto-summary is a conversion of set false* redistribute connected route-map * is a conversion of set true no redistribute connected is a conversion of set false* redistribute static route-map * is a conversion of set true no redistribute static is a conversion of set false synchronization is a conversion of set true no synchronization is a conversion of set false"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-4","l":"CLI","p":["activate is a conversion of set true deactivate is a conversion of set false"]},{"i":"unit-4","l":"Unit","p":["Link to github : junos-unit"]},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"i":"cli-5","l":"CLI","p":["auto-discovery is conversion of set \"frinx-bgp-extension:transport\" keepalive is conversion of set \"timer_mode\" 0-21845 is conversion of set \"time_before\" 3-65535 is conversion of set \"timer_after\" direct, static is conversions of set \"import_route\""]},{"i":"unit-5","l":"Unit","p":["Link to github : huawei-unit"]}],[{"i":"intermediate-system-to-intermediate-system-is-is","l":"Intermediate System to Intermediate System (IS-IS)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-623","l":"Cisco IOS XR 6.2.3"},{"l":"CLI","p":["point-to-point is a conversion of set POINT_TO_POINT","value frinx-openconfig-isis-types:IPV6 is to be converted to ipv6 value frinx-openconfig-isis-types:UNICAST is to be converted to unicast value LEVEL_1 is to be converted to level-1 value LEVEL_2 is to be converted to level-2 value LEVEL_1_2 is to be converted to level-1-2"]},{"i":"cisco-ios-xr-661cli","l":"Cisco IOS XR 6.6.1(CLI)"},{"i":"cli-1","l":"CLI","p":["value frinx-isis-extension:NOT_SET is to be converted to max-link-metric value frinx-isis-extension:LEVEL_1 is to be converted to max-link-metric level 1 value frinx-isis-extension:LEVEL_2 is to be converted to max-link-metric level 2","value frinx-openconfig-isis-types:IPV6 is to be converted to ipv6 value frinx-openconfig-isis-types:UNICAST is to be converted to unicast","is converted from .","if is LEVEL_1, then is set as level-1","if is LEVEL_2, then is set as level-2","if is LEVEL_1_2, then is set as level-1-2"]}],[{"i":"open-shortest-path-first-ospf","l":"Open Shortest Path First (OSPF)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands","p":["include-stub is a conversion of MAX_METRIC_INCLUDE_STUB in the include list of the max-metric-timer external-lsa is a conversion of MAX_METRIC_INCLUDE_TYPE2_EXTERNAL in the include list of the max-metric-timer summary-lsa is a conversion of MAX_METRIC_SUMMARY_LSA in the include list of the max-metric-timer"]},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["bfd fast-detect is a conversion of set true bfd fast-detect disable is a conversion of set false mpls ldp sync is a conversion of set true mpls ldp sync disabled is a conversion of set false passive enable is a conversion of set true passive disabled is a conversion of set false","** value MAX_METRIC_ON_SYSTEM_BOOT is to be converted to on-startup** value MAX_METRIC_ON_SWITCHOVER is to be converted to on-switchover"]},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-2","l":"CLI","p":["delete protocols ospf area interface disable is a conversion of set true set protocols ospf area interface disable is a conversion of set false"]},{"i":"unit-2","l":"Unit","p":["Link to github : junos-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-3","l":"CLI"},{"i":"unit-3","l":"Unit","p":["Link to github : junos-unit"]}],[{"i":"open-shortest-path-first-v3-ospfv3","l":"Open Shortest Path First v3 (OSPFv3)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI","p":["value STUB_ROUTER_MAX_METRIC is to be converted to max-metric value STUB_ROUTER_R_BIT is to be converted to r-bit value STUB_ROUTER_V6_BIT is to be converted to v6-bit"]},{"l":"Unit","p":["Link to github : xr-unit"]}],[{"l":"Static Route"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS 12"},{"l":"CLI","p":["ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST is parsed from is parsed from"]},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"i":"cli-1","l":"CLI","p":["ipv4 unicast is a conversion of set IPV4_UNICAST ipv6 unicast is a conversion of set IPV6_UNICAST is parsed from is parsed from"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]}],[{"l":"VLAN"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI","p":["no shutdown is a conversion of set ACTIVE shutdown is a conversion of set SUSPENDED"]},{"i":"dasan-nos-sfurr56p5","l":"Dasan NOS SFU.RR.5.6p5"},{"i":"cli-1","l":"CLI","p":["if is true","if is false"]},{"l":"Ciena SAOS 614"},{"i":"cli-2","l":"CLI","p":["should be pure numeric, converted from oc-vlan-types:TPID_TYPES from openconfig enable is a conversion of to true disable is a conversion of to false is an enumeration trust-mode - options are client-trusted, server-trusted, dualrole-trusted and untrusted"]}],[{"i":"configure-network-instance-vrf","l":"Configure network instance (VRF)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI"},{"l":"Unit","p":["Link to github : ios-unit"]},{"l":"Configure default network instance"},{"i":"url-1","l":"URL"},{"i":"openconfig-yang-1","l":"OPENCONFIG YANG","p":["YANG models","vlans definition - vlans policy-forwarding definition - policy-forwarding protocols definition - protocols interface-name is a conversion of each interface name for this network-instance cisco-ipv6-config is a conversion of global ipv6 configuration for device and consist of: unicast-routing and cef whose values can only be true or false"]},{"i":"os-commands-1","l":"OS Commands"},{"i":"cisco-ios-classic-1524s5","l":"Cisco IOS Classic (15.2(4)S5)"},{"i":"cli-1","l":"CLI"}],[{"l":"Routing Policy"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-12-ios-15-ios-xe-15-ios-xe-16-ios-xe-17","l":"Cisco IOS 12, IOS 15, IOS XE 15, IOS XE 16, IOS XE 17","p":["permit is a conversion of set to frinx-cisco-routing-policy-extension:PERMIT deny is a conversion of set to frinx-cisco-routing-policy-extension:DENY","permit is a conversion of set to community-member deny is a conversion of set to frinx-openconfig-bgp-policy-extension:community-member-deny","permit is a conversion of set to frinx-cisco-routing-policy-extension:PERMIT deny is a conversion of set to frinx-cisco-routing-policy-extension:DENY set community (no-export) is a conversion of set to frinx-openconfig-bgp-types:NO_EXPORT set community (no-advertise) is a conversion of set to frinx-openconfig-bgp-types:NO_ADVERTISE* match tag * is a tag element in match clause set to frinx-cisco-routing-policy-extension:tags"]},{"i":"cisco-ios-xr-534-ios-xr-662","l":"Cisco IOS XR 5.3.4, IOS XR 6.6.2"},{"l":"CLI","p":["is parsed from .","If is \"exact\", then is not set.","If matches to pattern of .., then is set as \"le ge \".","* destination in * is a conversion of set to ANY* not destination in * is a conversion of set to INVERT","as-path length le is a conversion of set to frinx-openconfig-policy-types:ATTRIBUTE_LE as-path length ge is a conversion of set to frinx-openconfig-policy-types:ATTRIBUTE_GE as-path length eq is a conversion of set to frinx-openconfig-policy-types:ATTRIBUTE_EQ","community match-any is a conversion of set to ANY community match-every is a conversion of set to ALL","drop is a conversion of set to REJECT_ROUTE done is a conversion of set to ACCEPT_ROUTE pass is a conversion of set to PASS_ROUTE","set community (no-export) is a conversion of set to frinx-openconfig-bgp-types:NO_EXPORT set community (no-advertise) is a conversion of set to frinx-openconfig-bgp-types:NO_ADVERTISE set community (local-as) is a conversion of set to frinx-openconfig-bgp-types:NO_EXPORT_SUBCONFED","* as-path in * is a conversion of set to ANY* not as-path in * is a conversion of set to INVERT"]},{"l":"Examples"},{"i":"junos-141x53-d408","l":"Junos 14.1X53-D40.8"},{"i":"cli-1","l":"CLI"},{"i":"examples-1","l":"Examples"}],[{"i":"aaa---authentication-authorization-accounting","l":"AAA - Authentication Authorization Accounting"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"l":"CLI","p":["local, radius is conversions of set to \"authentication-method\" local, radius is conversions of set to \"accounting-method\" start-fail is conversion of set to \"fail-policy\" online is conversion of set to \"fail-policy-mode\" telnet, terminal, ssh, ftp is conversions of set to \"frinx-huawei-aaa-extension:service-type\" 1-15 is conversion of set to \"frinx-huawei-aaa-extension:privilege-level\""]},{"l":"Unit","p":["Link to GitHub : huawei-unit"]},{"l":"SAOS 6"},{"i":"cli-1","l":"CLI","p":["limited, admin, super, diag is conversion of set to \"frinx-ciena-aaa-extension:access-level\""]},{"i":"unit-1","l":"Unit","p":["Link to GitHub : saos6-unit"]}],[{"i":"broadcast-containment-broadcast-containment-filters","l":"Broadcast-Containment (Broadcast-containment filters)"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"l":"CLI","p":["enable is conversion of set true disable is conversion of set false"]},{"l":"Unit"}],[{"l":"Configure CDP interfaces"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI","p":["cdp enable is conversion of \"enabled\": true no cdp enable is conversion of \"enabled\": false"]},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xr-xrv-513-and-xrv-612-tested","l":"Cisco IOS XR (XRv 5.1.3 and XRv 6.1.2 tested)"},{"i":"cli-1","l":"CLI","p":["cdp is conversion of \"enabled\": true no cdp is conversion of \"enabled\": false"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"brocade-v560ft163","l":"Brocade (V5.6.0fT163)"},{"i":"cli-2","l":"CLI","p":["cdp enable is conversion of \"enabled\": true no cdp enable is conversion of \"enabled\": false"]},{"i":"unit-2","l":"Unit","p":["Link to github : brocade-unit"]}],[{"l":"Configure FDP interfaces"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"i":"brocade-v560ft163","l":"Brocade (V5.6.0fT163)"},{"l":"CLI","p":["fdp enable is conversion of \"enabled\": true no fdp enable is conversion of \"enabled\": false"]},{"l":"Unit","p":["NOT IMPLEMENTED"]}],[{"l":"Configure STP interfaces"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"brocade-v560ft163","l":"Brocade (V5.6.0fT163)"},{"l":"CLI","p":["If /stp/interfaces/interface/ exists","If /interfaces/interface/ exists and /stp/interfaces/interface/ does not exist"]},{"l":"Unit","p":["NOT IMPLEMENTED"]}],[{"i":"ethernet-oam--ethernet-cfm","l":"Ethernet OAM / Ethernet CFM"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-662","l":"Cisco IOS XR 6.6.2"},{"l":"CLI","p":["ethernet cfm is a conversion of set to true no ethernet cfm is a conversion of set to false efd is a conversion of set to true no efd is a conversion of set to false"]},{"l":"Unit","p":["Link to github : xr-unit"]}],[{"i":"ethernet-virtual-circuit-evc","l":"Ethernet Virtual Circuit (EVC)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xe-16","l":"Cisco IOS XE 16.*"},{"l":"CLI","p":["* ethernet evc * is creating evc configuration with name* no ethernet evc * is deleting evc configuration with name"]},{"l":"Unit","p":["Link to github : ios-xe-evc-unit"]}],[{"i":"ethernet-virtual-private-network-evpn","l":"Ethernet Virtual Private Network (EVPN)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-661-via-netconf","l":"Cisco IOS XR 6.6.1 (via NetConf)"},{"l":"CLI","p":["evpn is a conversion of set to true no evpn is a conversion of set to false is parsed from example is Bundle-Ether100 -> is 100 is parsed from Bundle-Ether is a conversion of set to \"iana-if-type:ieee8023adLag\" mode port-active is a conversion of set to \"frinx-es-lb-mode:PORT-ACTIVE\" mode single-active is a conversion of set to \"frinx-es-lb-mode:SINGLE-ACTIVE\""]},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661-no-netconf","l":"Cisco IOS XR 6.6.1 (no NetConf)"},{"i":"cli-1","l":"CLI","p":["evpn is a conversion of set true no evpn is a conversion of set false cost-out is a conversion of set true no cost-out is a conversion of set false or null"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]}],[{"i":"hot-standby-router-protocol-hsrp","l":"Hot Standby Router Protocol (HSRP)"},{"l":"URL"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]}],[{"i":"l2-cft-layer-2-control-frame-forwarding","l":"L2-Cft (Layer 2 Control Frame Forwarding)"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"l":"CLI","p":["can be can be <802.1x | all-bridges-block | cisco-cdp | cisco-dtp | cisco-pagp | cisco-pvst | cisco-stp-uplink-fast | cisco-udld | cisco-vtp | elmi | esmc | garp-block | gmrp | gvrp | lacp | lacp-marker | lldp | oam | ptp-peer-delay | vlan-bridge | xstp> if == mef-ce1 -> can be also bridge-block if == mef-ce2 -> can be also can be "]},{"l":"Unit"}],[{"i":"logging-syslog","l":"Logging (syslog)"},{"l":"URL"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"l":"CLI"},{"l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-623","l":"Cisco IOS XR 6.2.3"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"cisco-ios-xr-661","l":"Cisco IOS XR 6.6.1"},{"i":"cli-2","l":"CLI"},{"i":"unit-2","l":"Unit","p":["Link to github : xr-unit"]}],[{"l":"Privilege"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"cisco-ios-12-15-16--ios-xe-15-16-17","l":"Cisco IOS 12, 15, 16 / IOS XE 15, 16, 17"},{"l":"CLI"},{"l":"Unit","p":["Link to github : ios-privilege-unit"]}],[{"l":"Probes"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"i":"junos-182r1-s21","l":"Junos 18.2R1-S2.1"},{"l":"CLI","p":["set services rpm probe delegate-probes is a conversion of < delegate-probes> set true set services rpm probe test target address is a conversion of < target-type> set address"]},{"l":"Unit","p":["Link to github : junos-unit"]}],[{"l":"Quality of Service"},{"l":"URL"},{"l":"OPENCONFIG YANG"},{"i":"url-1","l":"URL"},{"i":"openconfig-yang-1","l":"OPENCONFIG YANG"},{"i":"url-2","l":"URL"},{"i":"openconfig-yang-2","l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-12-15-16--ios-xe-15-16-17","l":"Cisco IOS 12, 15, 16 / IOS XE 15, 16, 17"},{"l":"CLI"},{"l":"Usage","p":["A term marks one or more conditions depending on the class-map type.","When class-map type: match-all, there is just one term, that MUST be called 'all'.","When class-map type: match-any, the terms are numbered from 1 ... number_of_conditions. In this case, the {{term_id}} marks the line, where the conditions specified in conditions is written."]},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"i":"cli-1","l":"CLI"},{"i":"usage-1","l":"Usage","p":["A term marks one or more conditions depending on the class-map type.","When class-map type: match-all, there is just one term, that MUST be called 'all'.","When class-map type: match-any, the terms are numbered from 1 ... number_of_conditions. In this case, the {{term_id}} marks the line, where the conditions specified in conditions is written.","Example:","will create 5 terms numbered from 1 to 5, where term 1 contains condition for qos-group, term 2 contains condition for mpls, etc.","Writing will occur in ascending order. Reading is the same, first condition is put into first term, etc."]},{"i":"huawei-ne5000e-v800r009c10spc310","l":"Huawei NE5000E (V800R009C10SPC310)"},{"i":"cli-2","l":"CLI"},{"l":"Unit","p":["Link to github : huawei-unit"]},{"i":"ciena-saos-614","l":"Ciena SAOS 6.14"},{"i":"cli-3","l":"CLI","p":["traffic-profiling enable is a conversion of {{qos_enabled}} set to true traffic-profiling disable is a conversion of {{qos_enabled}} set to false","{{scheduler_type}} can be port_policy- this issues traffic-profiling commands. The {{scheduler_seq}} will be always 0, there can be just one scheduler of this type.{{scheduler_type}} can be queue_group_policy- this issues traffic-services command. The {{scheduler_seq}} is represented by queue number."]}],[{"l":"Relay Agent"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"SAOS 6"},{"l":"CLI","p":["true or false is conversion of set to \"enable\" rid-string, device-hostname, device-mac is conversion of set to \"remote_id_type\" true or false is conversion of set to \"replace-option82\""]},{"l":"Unit","p":["Link to GitHub : saos6-unit"]},{"l":"SAOS 8"},{"i":"cli-1","l":"CLI","p":["true or false is conversion of set to \"enable\" rid-string, device-hostname, device-mac is conversion of set to \"remote_id_type\" true or false is conversion of set to \"replace-option82\""]},{"i":"unit-1","l":"Unit","p":["Link to GitHub : saos8-unit"]}],[{"i":"simple-network-management-protocol-snmp","l":"Simple Network Management Protocol (SNMP)"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"i":"url-1","l":"URL"},{"i":"openconfig-yang-1","l":"OPENCONFIG YANG"},{"i":"url-2","l":"URL"},{"i":"openconfig-yang-2","l":"OPENCONFIG YANG"},{"l":"OS Configuration Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI"},{"l":"Unit","p":["Link to github : ios-unit"]},{"i":"cisco-ios-xr-534","l":"Cisco IOS XR 5.3.4"},{"i":"cli-1","l":"CLI","p":["By default enabled on all interfaces. To disable, use:","To enable disabled interfaces use:","enabled:true is a conversion of snmp set enabled","enabled:false is a conversion of snmp set disabled"]},{"i":"unit-1","l":"Unit","p":["Link to github : xr-unit"]},{"i":"junos-173r110","l":"Junos 17.3R1.10"},{"i":"cli-2","l":"CLI"},{"i":"unit-2","l":"Unit","p":["Link to github : junos-unit"]}],[{"l":"System-wide services and functions"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"IOS XE ASR920"},{"l":"CLI"},{"l":"Unit","p":["Link to github : [ios-xe-unit]"]}],[{"i":"#","p":["Network Instances","Protocols","BGP summary","BGP RIB","OSPF summary","Discovery protocols","CDP","LLDP"]},{"l":"Network Instances"},{"l":"Protocols"},{"l":"BGP summary"},{"l":"BGP RIB"},{"l":"OSPF summary"},{"l":"Discovery protocols"},{"l":"CDP"},{"l":"LLDP"}],[{"i":"bgp-global--neighbors","l":"BGP global + neighbors"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI"},{"l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : ios-unit"]},{"i":"cisco-xr-612","l":"Cisco XR 6.1.2"},{"l":"Netconf"},{"l":"Device YANG","p":["Link to github : xml-sample"]},{"i":"unit-1","l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : xr-unit"]}],[{"l":"BGP RIB"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI","p":["'*' (valid route) translates to \"valid-route\" : true'i' (internal) translates to \"origin\": \"i\""]},{"l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : ios-unit"]}],[{"i":"show-router-ospf-type-id-interfaces","l":"Show router ospf type, ID, interfaces"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI","p":["Supporting command to determine OPSF - VRF relationships:","Supporting command to show interfaces"]},{"l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : ios-unit"]},{"i":"cisco-xr-612","l":"Cisco XR 6.1.2"},{"l":"Netconf"},{"l":"Device YANG","p":["Link to github : xml-sample"]},{"i":"unit-1","l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : xr-unit"]}],[{"l":"Interfaces"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"CER Arris devices"},{"l":"CLI"},{"l":"Unit","p":["Link to GitHub : cer-unit"]}],[{"l":"Platform"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"Cisco IOS Classic"},{"l":"CLI"},{"l":"Unit","p":["Link to GitHub : ios-unit"]},{"i":"cisco-ios-xe-15-16-17","l":"Cisco IOS XE 15, 16, 17"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to GitHub : ios-xe-unit"]},{"l":"Ciena SAOS 6"},{"i":"cli-2","l":"CLI"},{"i":"unit-2","l":"Unit","p":["Link to GitHub : saos6-unit"]},{"l":"Ciena SAOS 8"},{"i":"cli-3","l":"CLI"},{"i":"unit-3","l":"Unit","p":["Link to GitHub : saos8-unit"]},{"l":"CER Arris devices"},{"i":"cli-4","l":"CLI"},{"i":"unit-4","l":"Unit","p":["Link to GitHub : cer-unit"]}],[{"l":"Show CDP interfaces and neighbors"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI"},{"l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : ios-unit"]},{"i":"cisco-xr-612","l":"Cisco XR 6.1.2"},{"l":"Netconf"},{"l":"Device YANG","p":["Link to github : xml-sample"]},{"i":"unit-1","l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : xr-unit"]}],[{"l":"Show LLDP interfaces and neighbors"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Commands"},{"i":"cisco-ios-classic-1524s5--xe-1533s2","l":"Cisco IOS Classic (15.2(4)S5) / XE (15.3(3)S2)"},{"l":"CLI"},{"l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : ios-unit"]},{"i":"cisco-xr-612","l":"Cisco XR 6.1.2"},{"l":"Netconf"},{"l":"Device YANG","p":["Link to github : xml-sample"]},{"i":"unit-1","l":"Unit","p":["Unit version range: 3.1.1.rc1-frinx","Link to github : xr-unit"]}],[{"l":"System"},{"l":"URL"},{"l":"OPENCONFIG YANG","p":["YANG models"]},{"l":"OS Configuration Commands"},{"l":"Ciena SAOS 6"},{"l":"CLI"},{"l":"Unit","p":["Link to GitHub : saos6-unit"]},{"l":"Ciena SAOS 8"},{"i":"cli-1","l":"CLI"},{"i":"unit-1","l":"Unit","p":["Link to GitHub : saos8-unit"]}],[{"l":"Table of Contents","p":["=================","Base Handlers","Base readers","Base writers","Best practices for handlers (readers/writers)","Chunk templates","CLI Init Translation Unit","CLI Translation Unit","Device registration","Documentation","Finding mapping between device and the model","Handlers","Mandatory interfaces to implement","Module structure","NETCONF Unified Translation Unit","OpenConfig to device config mapping","Plaintext parsing hints","Readers","TranslateUnit","Translation Framework","Translation units for different device versions","Translation Units in general","Util classes","Writers"]},{"l":"Translation Framework","p":["The translation framework allows translation units to:","Add YANG model into the system","Register Handlers for all or a subset of nodes defined in the YANG model","Register the entire unit into the system, which is then able to perform"]},{"l":"OpenConfig to device config mapping"},{"l":"Finding mapping between device and the model","p":["Preferred YANG models for device config and operational data are OpenConfig models.","These models usually represents configuration part in container config and operational part in container state. Operational data is config data + operational data.","This site http://ops.openconfig.net/branches/master/ may be used for better browsing in OpenConfig YANG models. Another option is to generate YANG tree representation by using generate_html.sh in https://github.com/FRINXio/openconfig.","YANG models used in UniConfig framework need to be located in https://github.com/FRINXio/openconfig. In case the desired functionality is not modeled yet, you can create new YANG with its own structure or it can augment existing OpenConfig models. Guideline, how to write OpenConfig models can be found at http://www.openconfig.net/docs/style-guide/."]},{"l":"Documentation","p":["There is translation-units-docs page as a single point of truth for mapping. Use __ notation for variables in the templates. This notation is postman compatible."]},{"l":"Translation Units in general"},{"l":"Module structure","p":["Translation unit is a self contained project which implements a mapping between OpenConfig based YANG models and device specific configuration. It is used by the FRINX ODL to perform translation between device specific configuration model and standard (OpenConfig) models. A unit usually consists of:","Handlers","Readers","Writers","TranslateUnit implementation","RPCs"]},{"l":"Handlers","p":["Each complex node in YANG (container, list, augment...) should have a dedicated handler (Reader, Writer)","This enables extensibility, readability and the framework can easily filter and process the data this way","Unless there is a need to also handle child nodes, in which case register the handler using subtreeAdd method from the registries","There are 2 types of handlers: Readers (Read operation) and Writers (Create, Update, Delete operation)","One can implement just the readers or both readers and writers for YANG models. Writers must have counterpart readers because of reconciliation.","Readers and Writers should use the InstanceIdentifier parameter they receive in readCurrentAttributes or writeCurrentAttributes methods to find information about keys for their parent nodes. E.g. Reader registered under ID: /interfaces/interface/config will always receive keyed version of that ID: /interface/interface[Loopback0]/config. So it can use method firstKeyOf on InstanceIdentifier to get the keys.","RWUtils class contains methods for InstanceIdentifier manipulation.","Readers and writers can be easily tested and it is necessary to provide unit tests for all of them. It's important to cover readCurrentAttributes and writeCurrentAttributes with all possible scenarios (all data there, no data there, partial data there...)","Writers may use Preconditions.checkArgument() before accessing the device. Fail of the precondition check does not invoke default rollback (opposite operation) on the writer where precondition is located."]},{"l":"Base Handlers","p":["When a handler for the same YANG node is implemented to conform various devices, it tends to lead to a lot of boilerplate and duplicate code. Therefore, we should implement a base handler for such handlers. How does it work:","create a base-project (if there isn't any) to group base handlers (eg. for an interface handler, choose interface-base project)","each base handler needs to be abstract and implement same interfaces as the original handler","extract common functionality in the base handler. Common functionality means that it will conform the majority of the original handlers. If a handler does not share the extracted functionality, it needs to override original interface methods, to hide the extracted functionality.","let original handlers extend base abstract handler"]},{"l":"CLI Translation Unit","p":["CLI Translation units are located in https://github.com/FRINXio/cli-units repository. JAVA is used in CLI translation units."]},{"l":"Readers","p":["Readers are handlers responsible for reading and parsing the data coming from a device","There are 2 types of readers: Reader and ListReader. Reader can be used to handle container or augmentation nodes and ListReader should handle list nodes from YANG.","Both types need to implement readCurrentAttributes to fill the builder with appropriate values","ListReader needs to also implement getAllIds() where it retrieves a key for each item to be present in current list. After the list is received, framework will invoke readCurrentAttributes for each item from getAllIds","Readers should always use overloaded blockingRead method which takes in the ReadContext since that method performs caching internally","Use full version of commands e.g. show running-config interface instead of sh run int"]},{"l":"Mandatory interfaces to implement","p":["Each reader needs to implement one of these interfaces based on type of target node in YANG. These interfaces also contain util methods which may be used for better manipulation with data. For more information about methods please read javadocs.","CliConfigListReader- implement this interface if target composite node in YANG is list and represents config data.","CliConfigReader- implement this interface if target composite node in YANG is container or augmentation and represents config data.","CliOperListReader- implement this interface if target composite node in YANG is list and represents operational data.","CliOperReader- implement this interface if target composite node in YANG is container or augmentation and represents operational data.","In cases where you want to invoke multiple readers on reading one YANG node, extend following abstract classes:","CompositeListReader- extend this abstract class if multiple list readers need to be invoked when reading specific list in YANG.","CompositeReader- extend this abstract class if multiple readers need to be invoked when reading specific node in YANG.","A practical example of their usage is reading network instance based on it's type. All child readers need to implement a check when the particular reader should be invoked or the parent reader should move on to the next reader.","For example child reader for bgp (located under protocol) needs to check if identifier in protocol has value BGP. Otherwise reader for bgp will be invoked even if protocol identifier is OSPF."]},{"l":"Util classes","p":["ParsingUtils- use methods of this util class if you want to parse plaintext to java object builder"]},{"l":"Plaintext parsing hints","p":["Use as specific regular expressions when parsing CLI output as possible","For Cisco CLI devices avoid using section and other advanced formatting parameters. Only | include | exclude and | begin are allowed.","Use CONFIG data as the source of truth when parsing information from device. Except when parsing state containers (or containers explicitly marked as config false).","I.e. use sh run| include router ospf instead of sh ospf when retrieving ospf routers list.","In some cases, it is not possible to just use config data e.g. sh run interface does not show any data for interfaces that have no configuration. In this case it is necessary to use operational information from e.g. sh ip int brief","Use following pattern when parsing multiline output from CLI, where it is difficult to extract lines and their relationships","I.e. when parsing configured BGP neighbors per address family following command can be used: ** sh run | include router bgp| address-family|^ neighbor which results in:","This output can then be parsed by:","Remove newlines to get a single line of string","Replace \"router\" with \"\\nrouter\" to separate bgp routers per line","Find the line that matches required router bgp","Take that line and replace \"address-family\" with \"\\naddress-family\" to get address-family neighbors per line"]},{"l":"Base Readers","p":["Each base reader should contain abstract methods:","String getReadCommand()- each child reader should fill in the read command used to get information needed for this reader. Arguments may vary and they are used to be more specific in the read command (eg. when creating a command to gather information about a specific interface, you may want to pass interface name as argument).","Pattern getLine(\\args>)- there may be more such methods and they are used to get the regular expression needed to parse output of the command (eg. in case of interface reader, you will create methods getDescriptionLine, getShutdownLine etc.)","Note: naming of the methods should be unified in order to be easily parsed by auto-generated documentation."]},{"l":"Writers","p":["A writer needs to implement all 3 methods: Write, Update, Delete in order to fully support default rollback mechanism of the framework","Time showed that update like 1. delete, 2. write is anti-pattern and should not be used. There is just one case where it is necessary: when re-writing list entry, you must first delete the previous entry, then write the new one, otherwise the previous entry would still be present and the new entry will be added to the list.","A writer can properly work only if there is a reader for the same composite node","A writer should check whether the command it executed was handled by the device properly (by checking the output) and if not throw one of the Write/Update/Delete FailedException","Chunk templating framework is preferred to use in writers it gives us:","Null safety","if/loop etc. inside templates","Default values and many more","Use full version of commands e.g. configure terminal instead of conf t"]},{"i":"mandatory-interfaces-to-implement-1","l":"Mandatory interfaces to implement","p":["Each writer needs to implement one of these interfaces based on type of target node in YANG. Unlike mandatory interfaces for reading, only interfaces for writing config data are available (because it is not possible to write operational data). These interfaces also contain util methods which may be used for better manipulation with data. For more information about methods please read javadocs.","All writers override updateCurrentAttributes method and avoid delete/write combination, unless specified in a comment.","CliListWriter- implement this interface if target composite node in YANG is list. An implementation needs to be registered as GenericListWriter.","CliWriter- implement this interface if target composite node in YANG is container or augmentation. An implementation needs to be registered as GenericWriter.","CompositeWriter- extend this abstract class when multiple writers need to be invoked on one YANG node. The writers need to implement a check whether or not should they be invoked."]},{"l":"Base Writers","p":["Each base writer should contain abstract methods:","String updateTemplate(Config before, Config after)- this method returns Chunk template used for writing and updating data on the device.","String deleteTemplate(Config data)- this method returns Chunk template used for deleting data from device.","Note: if updating data is done differently than writing new data, method String writeTemplate(Config data) might be used as well."]},{"l":"Chunk Templates","p":["Each original writer transformed to use a base writer should have all it's templates written in Chunk. We extended Chunk to achieve easier manipulation with data. There is now a new filter called update. It's usage is following:","\"{$data|update(mtu,mtu `$data.mtu`\\n,no mtu\\n)}\"","$data represents the data structure on which we check if it was updated from the previous state.","mtu first argument represents the name of the field that should be checked within the $data","mtu `$data.mtu`\\n second argument represents the actual string that will be sent to the device if the value of the field named in first argument was changed or didn't exist before","no mtu\\n third argument represents the actual string that will be sent to the device if the value of the field named in first argument was deleted","optional true fourth argument, if present, lets the filter know it should send both outputs to the device, first the delete string (third argument) then the update string (second argument)","Update filter does not send any of the strings to the device, if the value did not change.","When using this filter in updateTemplate method, you must use fT() method (format template) with one pair of the arguments being \"before\", before to let the template know what data represents the previous state.","Note: unfortunately, Opendaylight generates boolean fields instead of Boolean and Chunk does not work with boolean fields in the same way as any other object fields. Therefore for boolean values (eg. shutdown), you cannot use update filter and checking for changes needs to be done in a traditional way."]},{"l":"TranslateUnit","p":["Blueprint example of injecting TranslationUnitCollector to IosXRInterfaceUnit:","Handlers(readers/writers) need to be registered in this method. Parameter context.getTransport() returns Cli object containing methods for communication with a device via CLI - should be passed to readers/writers.","Implementation of TranslateUnit must be registered into TranslationUnitCollector and must specify device type and device version during registration. Snippet below shows registration of IosXRInterfaceUnit for device type \"ios xr\" all versions starting with \"5\".","Implementation of TranslateUnit must implement these methods:","Instance-identifier in generic reader/writer must be without keys pointing to the target composite node used in implemented reader/writer.","Instance-identifiers for YANG container and list (not for augmentations and nodes behind augmentations) are automatically generated to IIDs class (used in examples bellow) during build of openconfig project.","Ordering of writers- writers are stored in a linear structure and are invoked in order of registration. When registering a writer a relationship with another writer or set of writers can be expressed using addBefore, addAfter, subtreeAddBefore, subtreeAddAfter methods. E.g. InterfaceWriter and VRFInterfaceWriter should have a relationship: InterfaceWriter -> VRFInterfaceWriter so that first an interface is created and only then assigned to VRF. Note: VRF writer should be between them. If the order is not expressed during registration, commands might be executed on device in an unpredictable/invalid order.","Return RPC services implemented in the translation unit. Parameter context.getTransport() returns Cli object containing methods for communication with a device via CLI - may need to be passed to RPC implementations.","Return unique string among all translation units which will be used as ID for the translation unit (e.g. \"IOS XR Interface (Openconfig) translate unit\")","Return YANG models containing composite nodes handled by handlers(readers/writers). Default implementation returns empty Set if no handlers are implemented.","rRegistry.add","rRegistry.addNoop","rRegistry.subtreeAdd","Set getYangSchemas()","Set getRpcs(@Nonnull Context context)","String toString()","This method should also register for general Openconfig checks:","Translate unit class must implement interface TranslateUnit. Naming convention for translate unit class is device-type+openconfig-domain+Unit (e.g. IosXrInterfaceUnit). Translate unit class is usually instantiated, initialized and closed from Blueprint.","Use for writers handling data of whole composite node subtrees. This ensures that if only a child node is updated, the writer gets triggered. Method subtreeAdd requires a set of IIDs for all handled children, the IIDs must start from the reader itself, not from root.","Use to register noop writers","Use when a reader implementation also fills composite child nodes of target composite node. Method subtreeAdd requires a set of IIDs for all handled children, the IIDs must start from the reader itself, not from root.","Use when common GenericConfigListReader, GenericConfigReader, GenericOperListReader or GenericOperReader need to be registered.","Use when common GenericListWriter or GenericWriter are registered.","void provideHandlers(@Nonnull ModifiableReaderRegistryBuilder rRegistry, @Nonnull ModifiableWriterRegistryBuilder wRegistry, @Nonnull Context context)","wRegistry.add","wRegistry.subtreeAdd"]},{"l":"CLI Init Translation Unit","p":["Init translation unit does not contain readers and writers but it only contains implementation of TranslateUnit. There should be only one init translation unit per device type. Purpose of the init TU is to setup CLI prompt and define rollback strategy.","The implementation of TranslateUnit needs to override methods:","SessionInitializationStrategy getInitializer(@Nonnull final RemoteDeviceId id, @Nonnull final CliNode cliNodeConfiguration)","Implement and return device specific SessionInitializationStrategy where:","Setup device CLI terminal with attributes like width and length allowing to display infinite output.","Enter desired CLI mode which will be used as default - every reader and writer gets CLI prompt in this state (e.g. EXEC mode for IOS, config mode for IOS-XR, cli mode for Junos)","String toString()","Return unique string among all translation units which will be used as ID for the registration of the translation unit (e.g. \"Junos cli init (FRINX) translate unit\").","These methods may be overridden if necessary:","getPreCommitHook()- method that is invoked before actual commit is written into device. For example this method can enter configuration mode.","getCommitHook()- method that invokes actual commit and should catch any error on commit. Also it should handle any post-commit actions when the commit was successful.","getPostFailedHook()- method that is invoked when commit fails. Should implement aborts or revert strategies.","Methods like getYangSchemas, getRpcs should return empty sets and method provideHandlers should return nothing, just use the read registry and write registry to register handlers.."]},{"l":"NETCONF Unified Translation Unit","p":["Unified translation units are located in https://github.com/FRINXio/unitopo-units repository.","Kotlin is used as prefered programming language in NETCONF translation units because it provides type aliases and better null-safety."]},{"i":"readers-1","l":"Readers","p":["Readers are handlers responsible for reading and parsing the data coming from a device","There are 2 types of readers: Reader and ListReader. Reader can be used to handle container or argument nodes and ListReader should handle list nodes from YANG.","Both types need to implement readCurrentAttributes to fill the builder with appropriate values","ListReader needs to also implement getAllIds() where it retrieves a key for each item to be present in current list. After the list is received, framework will invoke readCurrentAttributes for each item from getAllIds"]},{"i":"mandatory-interfaces-to-implement-2","l":"Mandatory interfaces to implement","p":["Each reader needs to implement one of these interfaces based on type of target node in YANG.For more information about methods please read javadocs.","ConfigListReaderCustomizer- implement this interface if target composite node in YANG is list and represents config data.","ConfigReaderCustomizer- implement this interface if target composite node in YANG is container or augmentation and represents config data.","OperListReaderCustomizer- implement this interface if target composite node in YANG is list and represents operational data.","OperReaderCustomizer- implement this interface if target composite node in YANG is container or augmentation and represents operational data."]},{"i":"base-readers-1","l":"Base Readers","p":["Each base reader for netconf readers should be generic. The generic marks the data element within device YANG that is being parsed into. The base reader should contain abstract methods:","fun readIid(): InstanceIdentifier- each child reader should fill in the device specific InstanceIdentifier that points to the information needed for this reader. Arguments may vary and they are used to be more specific IID (eg. when creating an IID to gather information about a specific interface, you may want to pass interface name as argument).","fun readData(data: T?, configBuilder: ConfigBuilder, )- this method is used to transform Openconfig data (contained in ConfigBuilder) into device data (T) using .","Note: naming of the methods should be unified in order to be easily parsed by auto-generated documentation."]},{"i":"writers-1","l":"Writers","p":["A writer needs to implement all 3 methods: Write, Update, Delete in order to fully support default rollback mechanism of the framework","Time showed that update like 1. delete, 2. write is anti-pattern and should not be used. There is just one case where it is necessary: when re-writing list entry, you must first delete the previous entry, then write the new one, otherwise the previous entry would still be present and the new entry will be added to the list.","A writer can properly work only if there is a reader for the same composite node","The framework provides safe methods to use when handling data on device:","safePut deletes or adds managed data. Does not touch data that was previously on the device and is not handled by the writer.","safeMerge stores just the changed data into device. Does not touch data that was previously on the device and is not handled by the writer.","safeDelete removes data from the device only if the managed node does not contain any other information (even one not handled by the writer).","This test demonstrates the usage of safe methods."]},{"i":"mandatory-interfaces-to-implement-3","l":"Mandatory interfaces to implement","p":["Each writer needs to implement one of these interfaces based on type of target node in YANG. Unlike mandatory interfaces for reading, only interfaces for writing config data are available (because it is not possible to write operational data). For more information about methods please read javadocs.","ListWriterCustomizer- implement this interface if target composite node in YANG is list. An implementation needs to be registered as GenericListWriter.","WriterCustomizer- implement this interface if target composite node in YANG is container or augmentation. An implementation needs to be registered as GenericWriter."]},{"i":"base-writers-1","l":"Base Writers","p":["Each base writer should be generic and contain abstract methods:","fun getIid(id: InstanceIdentifier): InstanceIdentifier- this method returns InstanceIdentifier that points to a node where data should be written","fun getData(data: Config): T- this method transforms Openconfig data into device specific data (T)"]},{"i":"translateunit-1","l":"TranslateUnit","p":["Translate unit class must implement interface TranslateUnit. Naming convention for translate unit class is just name Unit. Translate unit class is usually instantiated, initialized and closed from Blueprint.","Implementation of TranslateUnit must be registered into TranslationUnitCollector and must provide set of supported underlay YANG models. Snippet below shows registration of Unit for junos device version 17.3.","Blueprint example of injecting TranslationUnitCollector to Juniper173InterfaceUnit:","Implementation of TranslateUnit must implement these methods:","toString(): String","Return unique string among all translation units which will be used as ID for the translation unit (e.g. \"IOS XR Interface (Openconfig) translate unit\")","getYangSchemas(): Set","Return YANG models containing composite nodes handled by handlers(readers/writers). It must return empty Set if no handlers are implemented.","getUnderlayYangSchemas(): Set","Return YANG module informations about underlay models used in the translation unit. These YANG modules describes configuration of NETCONF capable device.","getRpcs(underlayAccess: UnderlayAccess): Set>","Return RPC services implemented in the translation unit. Default implementation returns an emptySet. Parameter underlayAccess represents object containing methods for communication with a device via NETCONF and should be passed to readers/writers.","provideHandlers(rRegistry: ModifiableReaderRegistryBuilder, wRegistry: ModifiableWriterRegistryBuilder, underlayAccess: UnderlayAccess): Unit","Handlers(readers/writers) need to be registered in this method. underlayAccess represents object containing methods for communication with a device via NETCONF and should be passed to readers/writers.","How to register readers/writers is described in CLI TranslateUnit"]},{"l":"Translation units for different device versions","p":["In case of needing to implement a new CLI Translation Unit for specific version of device we create a new TranslateUnit(e.g. located in iosxr/mpls).","In this case we use IOSXR4.* implementation as an example."]},{"l":"Device registration","p":["In TranslateUnit we had just created, e.g. MplsUnitXR4.java, we have to register device as a constant located ../iosxr/utils/IosXrDevices.java containing device type and version as described in TranslateUnit documentation.","This unit can reuse all writers/readers from existing ones, except the writer (or other handler) we want to alter or create (in our example writer for tunnel configuration). We have to create a new writer with desired behaviour and add it into provideWriters method."]},{"i":"handlers-1","l":"Handlers","p":["In our example, the newly created writer have to implement CliWriter interface as well as all the methods mentioned in Writers. With other handlers we proceed with same logic.","Similar process apply on every new implementation of different device version."]},{"l":"How to write extensions for OpenConfig"},{"i":"best-practices-for-handlers-readerswriters","l":"Best practices for handlers (readers/writers)","p":["All comments are in English","All defined exceptions can be thrown from the code","All new dependencies and imports are actually used","All variables/methods are actually used","Before pushing the code make sure:","Chunk","Code has correct spacing","Commented out code","Comments are appropriate to the code behavior","Constants","Do not push code that contains following:","Double blank lines","java regexes","New classes/interfaces have the correct license header","New classes/interfaces/yang model have correct date","Reflection","Show commands","Static imports","Trailing whitespaces or tabs"]}],[{"l":"FAQ"},{"i":"what-is-the-datastore-used-in-frinx-uniconfig-","l":"What is the datastore used in FRINX UniConfig ?","p":["Uniconfig uses a custom in memory database which is part of MD-SAL and it is a very fast storage for YANG modeled data. UniConfig uses datastore only for caching data in the scope of a single transaction. For persistence purposes, UniConfig uses PostgreSQL database."]},{"i":"are-service-instances-stored-in-the-uniconfig-layer-of-frinx-","l":"Are service instances stored in the UniConfig layer of FRINX ?","p":["Only the „outputs“ of a service are stored and managed by UniConfig(e.g. service generates bgp config for 10 devices, which is pushed into UniConfig). The services themselves are responsible for managing their configuration/operational state and rely on the same database to store configuration or operational data."]},{"i":"how-does-frinx-deal-with-model-changes-","l":"How does FRINX deal with model changes ?","p":["OpenConfig models are compiled as part of the UniConfig and because of this reason it is possible to change these models only before compilation. On the other side, NETCONF models can be dynamically loaded from device and also manually updated using dedicated RPC:","https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/network-management-protocols/uniconfig_netconf/netconf-intro.html#registration-or-refreshing-of-netconf-cache-repository-using-rpc"]},{"i":"does-frinx-provide-auto-rollback-on-all-affected-devices-when-a-transaction-fails-on-one-or-more-devices-","l":"Does FRINX provide auto rollback on all affected devices, when a transaction fails on one or more devices ?","p":["Yes, all onboarded devices have full rollback implemented. But it is also possible to disable auto-rollback in UniConfig, so that successfully configured devices will keep their configuration. This can be done with setting up the 'do-rollback' flag to False in input of Commit RPC."]},{"i":"is-it-possible-to-show-the-differences-between-the-actual-device-configuration-and-the-operational-datastore-while-synchronizing-configuration-into-frinx-","l":"Is it possible to show the differences between the actual device configuration and the operational datastore while synchronizing configuration into FRINX ?","p":["sync (update operational)","show diff","drop the changes from device by replacing operational with config"]},{"i":"is-any-netconf-device-fully-supported-or-must-openconfig-be-mapped-to-netconf-as-well-","l":"Is any NETCONF device fully supported, or must OpenConfig be mapped to netconf as well ?","p":["You can either use the native device models (via UniConfig native) or use the existing translation units between OpenConfig and vendor models."]},{"i":"are-the-libraries-that-are-used-to-access-the-config-data-store-model-driven-","l":"Are the libraries that are used to access the Config Data Store model driven ?","p":["UniConfig has a DataBroker interface and a concept of InstanceIdentifier. Those are the model driven APIs for data access. More info:","https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Concepts"]},{"i":"what-would-an-access-to-the-configuration-data-store-look-like-in-code-","l":"What would an access to the configuration data store look like in code ?","p":["A: Just to demonstrate API, in this example InterfaceConfigurations is read from CONF DS and put back to CONF DS.","B: In this example InterfaceConfigurations is read from OPER DS."]},{"i":"is-it-possible-in-frinx-to-run-transaction-on-two-disjunct-sets-of-devices-simultaneously-","l":"Is it possible in FRINX to run transaction on two disjunct sets of devices simultaneously ?","p":["UniConfig supports build-and-commit model using which it is possible to configure devices in the isolated transactions and commit them in parallel. If there are some conflicts between configured sets of devices, then the second transaction that is committed, will fail(however, it cannot happen on disjunct sets of devices)."]},{"i":"what-access-control-measures-does-frinx-offer-","l":"What access control measures does FRINX offer ?","p":["FRINX UniConfig supports local authentification, password authentification, public key authentification Token authentification, RADIUS based authentification and subtree based authentification via AAA Shiro project."]},{"i":"how-does-frinx-report-problems-with-device-interaction-","l":"How does FRINX report problems with device interaction ?","p":["If a device can not be reached during a UniConfig transaction (after trying reestablishing the connection) a timeout will occur and the cause for the transaction failure will be reported. UniConfig also uses keepalive messages for continuous verification of connection to devices(both using NETCONF and CLI management protocols)."]},{"i":"is-it-possible-to-backup-configuration-","l":"Is it possible to backup configuration ?","p":["UniConfig stores all committed configuration of devices, templates, and snapshots in the PostgreSQL database. We suggest to use existing techniques for backup that are also provided by PostgreSQL."]},{"i":"is-it-possible-to-enforce-policies-over-configuration-changes-","l":"Is it possible to enforce policies over configuration changes ?","p":["All customer specific validations and policy enforcements can be implemented in layers above UniConfig"]},{"i":"in-which-languages-are-the-libraries-to-access-frinx-written-","l":"In which languages are the libraries to access FRINX written ?","p":["UniConfig is written in JAVA and Kotlin which can use data objects generated from YANG. RESTful API (RESTCONF) can be used with language that implements REST client (for example, Python)."]},{"i":"does-frinx-detect-if-a-cluster-node-is-down-on-its-own-or-does-it-rely-on-a-high-availability-framework-","l":"Does FRINX detect if a cluster node is down on its own or does it rely on a high availability framework ?","p":["UniConfig instance is stateless - it doesn’t persist any configuration in its datastore (PostgreSQL is used for persistence) and it doesn’t keep permanent connections (connections to devices are created on-demand in the transaction). Because of the stateless architecture, UniConfig instances in the ‘cluster’ don’t have to communicate with each other and they don’t require any coordination. You must only keep in mind that requests that belong to the same transaction must be forwarded to the same UniConfig backend - for this purpose you can use any HA component that supports sticky sessions based on cookies (such as HA-proxy or Traefik)."]},{"i":"is-it-possible-for-frinx-to-report-problems-to-a-network-monitoring-system-","l":"Is it possible for FRINX to report problems to a network monitoring system ?","p":["FRINX UniConfig can propagate NETCONF notifications and internal UniConfig notifications or data-change-events from web sockets on Northbound API."]},{"i":"is-it-possible-to-do-additional-logging-on-the-logging-provided-by-uniconfig-","l":"Is it possible to do additional logging on the logging provided by UniConfig ?","p":["Yes it is. Each component writes logs at different verbosity levels of logging (ERROR, WARN, INFO, DEBUG, TRACE). We are using the logback framework for logging of messages - logging can be adjusted by modification of config/logback.xml file in the standard way. This file can be updated also on runtime. The second approach for adjusting of logging of some specific components is using logging controller: https://docs.frinx.io/frinx-uniconfig/UniConfig/user-guide/operational-procedures/logging/logging.html"]},{"i":"where-do-i-find-the-status-of-the-device-and-where-do-i-find-error-messages-when-installing-does-not-work-","l":"Where do I find the status of the device and where do I find error messages, when installing does not work ?","p":["installing/uninstalling process is done automatically - device is installed when UniConfig must read/write some data from/to device and device is automatically uninstalled at the end of the transaction if no other transaction is using the same installpoint. Users should not care about the installing process since it is transparent - it is useful only for debugging purposes. To get status of the installing process for all devices in the system, issue following request (it will show status as well as last connect attempt cause):","CLI devices:","NETCONF devices:"]},{"i":"what-does-installation-and-installing-exactly-do-","l":"What does installation and installing exactly do ?","p":["Opening IO session to device (TCP session with SSH and/or NETCONF on top of SSH session).","Exposing installpoint that can be used from internal API and RESTCONF API for interaction with device.","Opening internal transaction","installing of device with input parameters (CLI / NETCONF)","Syncing configuration from device","Writing configuration and install information into database","Uninstalling device","Committing transaction"]},{"i":"why-i-can-not-install-junos-device-on-uniconfig-","l":"Why I can not install Junos device on UniConfig ?","p":["If installing Junos devices is not possible and UniConfig gives response :","It is necessary to set up on Junos device netconf session compliant to RFC and Yang schemas (rfc-compliant, yang-compliant)"]}],[{"l":"Glossary of Terms","p":["MD-SAL https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:FAQ- Model driven service application layer","OPENFLOW https://en.wikipedia.org/wiki/OpenFlow- OpenFlow communications protocol that exposes the forwarding plane of a network switch or router over the network OPENDAYLIGHT https://www.opendaylight.org/","RESTCONF https://tools.ietf.org/html/draft-ietf-netconf-restconf-1- draft-ietf-netconf-restconf-12 SDN Software defined networking– management of network services through abstraction of higher-level functionality.","NETCONF https://tools.ietf.org/html/rfc6242","Using the NETCONF Protocol over Secure Shell (SSH) https://tools.ietf.org/html/rfc6241","Network Configuration Protocol (NETCONF) https://tools.ietf.org/html/rfc5277","NETCONF Event Notifications https://tools.ietf.org/html/rfc6243","With-defaults Capability for NETCONF YANG https://tools.ietf.org/html/rfc6020 a modelling language for NETCONF"]}],[{"l":"List of Supported Devices","p":[".*","(mounted as .*)","(mounted as ios xr .*)","(mounted as Junos 14.*)","(mounted as sros .*)","1.*","12.*","13*/14*","14.*","15.*","16.*","16.*(and later)","17.*","18.*","2.*","3.*","4.*","5.*","6.*","6.6.1 (and later)","8.*","Arista","Brocade","Calix","Casa","Ciena","Cisco","CLI access via REST","CLI to OC translation","Cumulus","Cumulus Linux","Dasan","Device OS Type","Device Version","eos","For details of translation units see our Github: cli_units and unitopo_units.","Here you can find list of all the devices and features supported by Frinx UniConfig:","Huawei","ios classic","ios xe","ios xr","IP Infusion","ironware","Juniper","junos","Microsoft","Mikrotik","NETCONF access via REST","NETCONF to OC translation","nexus","Nokia","nos","OC = OpenConfig","OcNOS","SAOS","SonicOS","sros","Ubiquity","ubnt es","Vendor","vrp"]}],[{"l":"FRINX Workflow Manager introduction","p":["FRINX Workflow Manager allows customers to create automated, repeatable, digital processes to build, grow and operate their digital communication infrastructure. FRINX Workflow Manager is based on open-source components and enables infrastructure and network engineers to create and operate workflows to implement configuration changes and obtain operational data from their heterogeneous networks and clouds. Typical examples are the automation of services that span resources in the cloud and physical assets, the automation of slices and capacity increases in mobile networks, the interaction with CRM and inventory systems, the management of Internet and Infrastructure services and the automation of core network functions. Workflow Manager can be deployed standalone or as part of FRINX Machine.","FRINX Workflow Manager uses Netflix's Conductor for task/workflow orchestration. We recommend to take a look at their Documentation as an introduction to Tasks, Workflows, Definitions and an overall prerequisite to working with FRINX Workflow Manager."]}],[{"l":"Create and Modify Workflows and Workers"},{"l":"Prepare Your Work Environment","p":["After you have installed and started the FRINX Machine (see\" https://github.com/FRINXio/FRINX-machine\") you will want to modify existing workflows or add new workflows and workers to meet your needs. We will be referring to the machine that is running the FRINX Machine containers as host. Typically that host is a VM running on your laptop, in your private cloud or in a public/virtual private cloud. Here is how to get started."]},{"l":"Creating a worker","p":["Now that we have our environment prepared, we can move on to the first step of creating a workflow. First we will create a worker that defines the tasks utilized in our workflow. The goal is to have the task in our workflow receive two input parameters (id_1 and id_2). The purpose of our task is to add the two input variables and return the result. The execution logic of our task will be implemented in a small python function called worker.","For a full documentation of tasks, workflows and the capabilities of Netflix Conductor, please go to https://netflix.github.io/conductor/","Create a worker in a correct repository (name of the worker is up to you):","This is what we put in the file in our case:","Core of the worker is a task that contains simple method which does addition with two inputs which user provides in GUI as you will see later. Workers can have multiple tasks within itself, in our case one is enough as an example.","After this, you must register your worker in the main python file\"main.py\" in the same directory where you just created your worker. All workers you want to use in Frinx Machine must be included in this file. File might look similar to this:","Notice lines 22 and 53, you must import both the worker file and include it in \"register_workers(cc)\" method.","That is all in terms of worker creation. There is however few more things to do in your environment. After doing all the above, we will want to build our Frinx Machine based on our local changes. For that we must edit the file \"swarm-fm-workflow.yml\"","Find block \"demo-workflows\" in this file. Change the image to use a image called \"local\" (2):","Now we can build our fm-workflows image with the added task. Use:","While it is not necessary to use \"--no-cache\" flag, we recommend it to make sure you rebuild the image with newly edited code and not the one stored in cache memory.","Now just start fm-workflows and you're good to go:","If you did everything correctly, you will now see your new task in Frinx Machine. Go to Workflow Manager -> Tasks -> Search:","Search integers","Now you can create workflow that uses this task. Workflow Manager-> \"+ New\":"]},{"i":"after-being-prompted-for-inputs-you-should-see-that-addition-ran-successfully","l":"After being prompted for inputs, you should see that addition ran successfully:","p":["Search integers"]}],[{"l":"Device Blueprints","p":["Blueprints allow you to create a template that can be used for quick adding of devices. They are created with JSON snippets."]},{"l":"Creating new blueprint","p":["To create a new blueprint click on the Explore button in the Explore and configure device tab and then click the Blueprints tab in the top bar. Here you can Add blueprint.","Create blueprint"]},{"l":"Using a blueprint","p":["To use blueprint when adding a new device toggle the \"Blueprints\" switch in the form and choose the blueprint that you want to use.","Use Blueprint"]},{"l":"Blueprint examples"},{"i":"cisco-classic-ios-cli","l":"Cisco classic IOS (cli)"},{"i":"cisco-ios-xr-netconf","l":"Cisco IOS XR (netconf)"},{"i":"junos-cli","l":"JUNOS (cli)"},{"i":"calix-netconf","l":"CALIX (netconf)"},{"i":"nokia-netconf","l":"Nokia (netconf)"},{"i":"ciena-cli","l":"Ciena (cli)"}],[{"l":"Device Inventory","p":["Devices are stored in a Device Inventory. From here they can be dynamically installed and uninstalled."]},{"l":"Adding device to inventory","p":["To add new device to invetory, click on the Add device button in the Device inventory tab.","FM Install"]},{"l":"JSON examples","p":["To adding a new device toggle the \"Blueprints\" switch in the form and choose the blueprint that you want to use.","New devices are added by JSON code snippets. They are similar to Blueprints with one addition: device_id must be specified in the snippet."]},{"i":"cisco-classic-ios-cli","l":"Cisco classic IOS (cli)"},{"i":"cisco-ios-xr-netconf","l":"Cisco IOS XR (netconf)"},{"i":"junos-cli","l":"JUNOS (cli)"},{"i":"calix-netconf","l":"CALIX (netconf)"},{"i":"nokia-netconf","l":"Nokia (netconf)"},{"i":"ciena-cli","l":"Ciena (cli)"}],[{"l":"Workflow Builder","p":["Workflow Builder is the graphical interface for Workflow Manager and is used to create, modify and manage workflows."]},{"l":"Creating new workflow","p":["To create a new workflow click on the Create button in the Create workflow tab and fill in workflow general parameters. Then you can proceed with adding tasks .","Parameter Name is required and must be unique. Keep in mind that the name cannot be changed later. Other parameters are optional and can be changed anytime.","Create new workflow"]},{"l":"Editing existing workflow","p":["To edit an already existing workflow, find the workflow in the Definitions tab, click on it and then click on the Edit button. A diagram of the workflow will be rendered on the canvas. Now you can restructure the workflow, add new tasks, remove tasks or edit the workflow information and parameters.","Workflow edit"]},{"l":"Adding tasks","p":["To add new task on canvas, find the task in the left menu and click the + icon.","Add task"]},{"l":"Removing tasks","p":["To remove a task, click on the three dots next to a task and press the Remove task button.","Delete task"]},{"l":"Task parameters","p":["To edit or add task parameters, double-click on the task that is placed on the canvas. Input parameters can be declared as:","Input provided by user, e.g.:","Variable provided by other task, e.g.:","Statically defined, e.g.:","For full documentation of tasks see: https://netflix.github.io/conductor/configuration/taskdef/."]},{"l":"System tasks"},{"i":"fork--join","l":"Fork & Join","p":["The 'Fork' function is used to schedule a parallel set of tasks.","A Join task MUST follow Fork task.","Fork and Join"]},{"l":"Decision","p":["A decision task is similar to an if...else statement in a programming language. The task takes 2 parameters:","name of the parameter in the task input whose value will be evaluated (default is param)","value that will be compared with param(or other specified input variable)","If param and is equal to are evaluated as equal, the workflow will continue to If branch, otherwise the workflow will continue in else branch.","Else branch is optional and can be empty."]},{"l":"Lambda","p":["Lambda Task helps execute ad hoc logic at Workflow run-time, using javax & Nashorn Javascript evaluator engine. This is particularly helpful in running simple evaluations in the Conductor server, instead of creating Workers.","The task output can then be referenced in downstream tasks like:"]},{"l":"HTTP","p":["An HTTP system task is used to make calls to another microservice over HTTP. You can use GET, PUT, POST, DELETE Methods and also you can set your custom header."]},{"l":"TERMINATE","p":["Task that can terminate a workflow with a given status and modify the workflow's output with a given parameter. It can act as a \"return\" statement for conditions where you simply want to terminate your workflow. For example, if you have a decision where the first condition is met, you want to execute some tasks, otherwise you want to finish your workflow.","name","description","notes","terminationStatus","can only accept “COMPLETED” or “FAILED”","task cannot be optional","workflowOutput","Expected workflow output"]},{"l":"EVENT","p":["Event task provides ability to publish an event (message) to either Conductor or an external eventing system like SQS. Event tasks are useful for creating event based dependencies for workflows and tasks.","When producing an event with Conductor as sink, the event name follows the structure:"]},{"l":"WAIT","p":["A wait task is implemented as a gate that remains in IN_PROGRESS state unless marked as COMPLETED or FAILED by an external trigger. To use a wait task, set the task type as WAIT"]},{"l":"jsonJQ","p":["jsonJQ is like sed for JSON data - it is especially useful for filtering JSON data.","Example of jsonJQ query expression could be:","It searches through the whole config and under the\"Cisco-IOS-XR-ifmgr-cfg:interface-configurations\" model we find the interface with a description that the user inputs$. The task would return the name interface with fitting description."]},{"l":"Kafka publish","p":["Kafka is a distributed publish-subscribe messaging system and a robust queue that can handle a high volume of data and enables you to pass messages from one end-point to another.","Kafka"]},{"l":"Subworkflows","p":["Subworkflows act as a regular tasks inside a parent workflow. Subworkflows can be expanded to view the tasks they contain (or other nested subworkflows) by clicking the three dots next to the subworkflow and then clicking the Expand button. Expanded subworkflows can be then edited the same way as parent workflow.","Simple tasks differs in color shade from Subworkflow tasks and cannot be expanded.","Expand"]},{"l":"Linking tasks","p":["To connect tasks or subworkflows into execution flow, drag and drop respective Out and In endpoints on nodes, like this: Out-> In"]},{"l":"Unlinking tasks","p":["To remove the link, double-click on the link."]},{"l":"Adding workflow information","p":["To provide additional workflow information, click on Actions in the upper right-hand corner and then click Edit workflow."]},{"l":"Output parameters","p":["We can specify custom output parameters of a workflow, by using JSON templates to generate the output of the workflow. If not specified, the output is defined as the output of the last executed task.","Let's say we have a task with taskReferenceName: task1 which returns summary and we want output of the worklow to be output of this specific task only. The outputParameter value named e.g. finalResult will be:","For full documentation of workflow parameters and definition read https://netflix.github.io/conductor/configuration/workflowdef/."]},{"i":"defaults--description","l":"Defaults & Description","p":["Here, we can define default values and descriptions for workflow inputs. Each input value declared as ${workflow.input...} will appear in a dropdown list of available input parameters."]},{"l":"Save and execute workflow","p":["To Save workflow, click on the Actions button in the upper right corner and select Save workflow. Then you can find the workflow in the Explore workflows section under Definitions tab.","To Execute workflow directly from the builder, click on the Save and execute button in the upper right corner. You will be prompted to provide input parameters.","Executing workflow will also save the workflow."]},{"l":"Import and export of workflows","p":["To import workflow, click the setting icon and then select the Import button. Only valid JSON definition of the workflow will be imported.","Imported workflow will not be saved until you Save or Execute it.","Import/Export workflow","To export and save the workflow in JSON format into your filesystem, click on Export button.","In order to choose a location to which you want to export the workflow, you have to have it enabled in your browser settings. Default location is Downloads folder."]}],[{"l":"FRINX Resource Manager introduction","p":["FRINX Resource Manager was developed for network operators and infrastructure engineers to manage their physical and logical assets and resources. Examples for assets are locations, equipment, ports and services. Examples for resources are IP addresses, VLAN IDs and other consumables required for operating data services. Resource Manager was developed specifically to address the needs of network and infrastructure engineers working with communication networks. FRINX Resource Manager provides GUI and a GraphQL based API to create, read, update and delete assets. Resource Manager can be deployed standalone or as part of FRINX Machine."]},{"l":"Features","p":["Following list contains features inherent to Resource Manager."]},{"l":"Resource type management","p":["Example resource types:","Location","Name: Latitude","Name: Longitude","Name: name of the property","Name: RD","Name: vlan","Property type","Resource Manager is flexible enough to enable user defined resource types without requiring code compilation or any other non-runtime task. With regard to resource types, this requires keeping the schema flexible enough so that users can define their own types and properties and thus create their own model.","Resource type is a blueprint for how to represent a resource instance. A resource type is essentially a set of property types, where each property type defines:","Route distinguisher","Type: float","Type: int","Type: int, string, float etc.","Type: String","VLAN"]},{"l":"Resource management","p":["A resource is an instance of a resource type consisting of a number of properties.","Example resources based on resource types from previous section:","VLAN_1","Property","Name: vlan","Value: 44","Route distinguisher_1","Name: RD","Value: 0:64222\uD83D\uDCAF172.16.1.0","Location_1","Name: Latitude","Value: 0.0","Name: Longitude","Resource types"]},{"l":"Flexible design","p":["One of the main non-functional goals of the Resource Manager is flexibility. We are designing Resource Manager to support an array of use cases without the need for modifications. To achieve flexibility we are allowing:","Custom resource type definition without changes in the DB schema","Custom allocation logic without the need to modify the backend code","Custom pool grouping to represent logical network parts (subnet, region, datacenter etc.)"]},{"l":"Multitenancy and RBAC","p":["Multitenancy and Role Based Access Control is supported by Resource Manager.","A simple RBAC model is implemented where only super-users (based on their role and user groups) can manipulate resource types, resource pools and labels. Regular users will only be able to read the above entities, allocate and free resources.","Resource Manager does not manage list tenants/users/roles/groups and relies on external ID provider. Following headers are expected by Resource Manager graphQL server:","Resource Manager does not store any information about users or tenants in the database, except the name or ID of a tenant provided in x-tenant-id header."]}],[{"l":"User Guide"},{"l":"API","p":["See examples in api_tests or a VRF IP management sample use case in postman collection."]},{"l":"UI","p":["See the Resource Manager frontend project on GitHub"]}],[{"l":"Pools","p":["A resource pool is an entity that allocates and deallocates resources for a single specific resource type. Resource pools are completely isolated from each other and there can be multiple resource pools for the same resource type even providing the same resource instances. Resource pools encapsulate the allocation logic and keep track of allocated resources. A pool instance should manage resources within the same network or logical network part (e.g. subnet, datacenter, region or the entire, global network).","Example pools:","IPv4 address pool allocating IP addresses from a range / subnet","VLAN pool allocating all available VLAN numbers 0 - 4096","Route distinguisher pool allocating route distinguishers from a specific, per customer, input","Depending on resource type and user’s requirements, pools need to be capable of allocating resources based on various criteria / algorithms. Currently, following pool types are supported by Resource Manager:"]},{"l":"SetPool","p":["Pool with statically allocated resources. Users have to define all the resources to be served in advance. The pool just provides one after another until each resource is in use.","This type of pool is suitable for cases where a set of resources to be served already exists.","Properties of SetPool","Config","Set of unique resources to provide","Name of the pool","Resource recycling - whether deallocated resources should be used again","Operational","Utilisation - % of pool capacity used"]},{"l":"SingletonPool","p":["SingletonPool serves just a single resource for every request.","This type of pool can be utilized in special uses cases such as serving a globally unique single AS number of an ISP. Instead of hardcoding the AS number as a constant in e.g. workflows, it can be “managed” and stored in the Resource Manager.","Properties of SingletonPool","Config","A single unique resources to provide","Name of the pool"]},{"l":"AllocatingPool","p":["a predefined set of resources cannot be used","AllocatingPool is a type of pool that enables algorithmical resource allocation. Instead of using a pre-allocated set of resources to simply distribute, it can create resources whenever asked for a new resource. This type of pool allows users to define a custom allocation logic, attach it to the pool and have use-case specific resource allocations available. Important feature of this pool type is the ability to accept new allocation logic from users in the form of a script without having to rebuild the Resource Manager in any way.","Allocation strategy - a script defining the allocation logic","Config","Example AllocationPools:","In general, anything that a user might need","Limit - hard limit on total number of resource that can be produced","Name of the pool","Operational","or in general whenever using an allocation script makes more sense then using a predefined set of resources","Pool providing all available VLAN numbers","Pool providing IPv4-mapped IPv6 addresses from a specific range / subnet","Pool providing just odd VLAN numbers","Pool providing random VLAN numbers","Pool providing Route Distinguishers that include customer specific information (which is passed as “additional input” as part of resource claim request)","Properties of AllocatingPool","resource creation requires additional inputs","Resource recycling - whether deallocated resources should be used again","This type of pool can be used when","Utilisation - % of pool limit used"]},{"l":"Nested pool","p":["Resource Manager allows to create nested pools. Nested pools provide possibility to create subgroups from already existing pools. With these subgroups it is easier to reason about topology."]},{"l":"How to create nested pool","p":["Process (in UI):","Create pool or open existing one","Allocate resource in newly created or existing pool","Open create pool page","Select parent from which nested pool should be created","Select allocated resource of parent from which nested pool will be taking resources","Fill other mandatory inputs","Push button to create nested pool","After successful submit newly created nested pool should be visible in pools list or in nested pools list in its parent detail page. Also it is possible to create nested pool from detail page of pool."]},{"l":"Allocation strategy overview","p":["Allocation strategy encapsulates the allocation logic and is always tied to (an) instance(s) of AllocatingPool. The strategy is defined in form of a script using Javascript (or similar) language and its responsibility is:","To produce a new (unique) resource instance based on a set of previously allocated resources and any additional, user submitted input.","Apart from a resource being unique, there are no other requirements on what the strategy needs to do. It gives users the freedom to implement any logic.","Allocation strategy can take any input provided in a structure named userInput. This input is provided by the user every time they claim a new resource.","Allocation strategy also gets access to a list of already allocated resources and any properties associated with the pool being utilized."]},{"l":"Pool hierarchies","p":["Resource Manager allows pools to be organized into hierarchies e.g."]},{"l":"Labels","p":["Labels enhance resource management by allowing a pool to be marked with a custom string. Multiple pools can have the same label forming a logical group of pools.","A group of pools under the same label can be dedicated to some logical part of a network (e.g. datacenter, subnet, region etc.).","A single pool should typically have only one label i.e. it should not be re-used across unrelated networks.","The following diagrams represent some of the configurations that can be achieved using Labels:"]},{"i":"configuration-pool-instance-per-label","l":"Configuration: Pool instance per Label","p":["Enables: Resource reuse in multiple networks","Instance per label"]},{"i":"configuration-pool-instance-under-multiple-labels","l":"Configuration: Pool instance under multiple labels","p":["Enables: Unique resources across different networks","Instance multiple labels"]},{"i":"configuration-pool-grouping","l":"Configuration: Pool grouping","p":["Enables: Dividing resource pools into groups based on network regions. Enables users to simply ask for a resource based on label name + resource type (removing the need to know specific pools)","Pool grouping"]},{"i":"configuration-multiple-pool-instances-under-the-same-label","l":"Configuration: Multiple pool instances under the same Label","p":["Enables: Resource pool expansion in case an existing pool runs out of resources. Serves as an alternative to existing pool reconfiguration. If multiple pools of the same type are grouped under the same label, the pools are drained of resources in the order they have been added to this group/label.","Multiple pool instances"]}],[{"l":"Resource Manager architecture","p":["Following diagram outlines the high level architecture of Resource Manager.","Architecture","User authentication and authorization as well as user and tenant management is outside of Resource Manager. Resource Manager is typically deployed behind an api-gateway that handles authentication and authorization relying on an external Identity Managmenet system.","The only aspect of tenancy management that needs to be handled by Resource Manager is: per tenant database creation and removal. Each tenant has its own database in database server."]},{"l":"Technology stack","p":["AAA","Also handles schema migration: creates or updates tables in DB according to ent schema","Backend server","Database","Ent is an ORM framework for go","Entgo.io","Gqlgen","Gqlgen is a graphql framework for go","GraphQL","Isolated and limited for safety and performance","Postgres","Primary API of Resource Manager will be exposed over GraphQL(over HTTP)","PSQL is the DB of choice, but thanks to ent framework hiding the interactions with the database, other SQL DB could be used in the future","RBAC rules can be defined as part of the schema","Resource Manager will rely on technologies used by the Inventory project currently residing at: https://github.com/facebookincubator/magma since both projects are similar and have similar requirements.","Separate process","Tenant and user management is out of scope of Resource Manager and will be handled by an external identity management system.","This section provides details on intended technologies to develop Resource Manager with.","WASM","Web assembly runs any user defined code executing allocation logic for user defined resource pools","Works well on top of entgo.io ORM"]},{"l":"Entity model","p":["Following diagram outlines the core entity model for Resource Manager:","Entities"]}],[{"l":"Developer Guide"},{"l":"Dependency on symphony","p":["Resource Manager currently depends on a project called symphony.","This project is not publicly accessible and without access to it, Resource Manager cannot be built. In that case, use pre built docker images from dockerhub."]},{"l":"Folder structure","p":["api-tests","core codebase for pools and resoruce allocation","ent- ORM schema and generated code for the DB","ent/schema","graph/graphhttp","graph/graphql","graphQL schema and generated code for graphQL server","graphQL server","integration tests","logging","logging framework","multitenancy, RBAC and DB connection management","ORM schema","pkg- helm chart for Resource Manager","pools","psql","psql DB connection provider","viewer"]},{"l":"Build","p":["It is advised to build Resource Manager as a docker image using Dockerfile and run it as a docker container.","The reason is that Resource Manager uses wasmer and pre built js and python engines for wasm. These are not part of the codebase and thus simply running Resource Manager would fail, unless you provide these resources e.g. by copying them out of Resource Manager built docker image.","Resource Manager utilizes wire to generate wiring code between major","components. Regenerating wiring is not part of standard build process ! After modifying any of the wire.go files perform:"]},{"l":"GraphQL schema","p":["Resource Manager exposes graphQL API and this is the schema."]},{"l":"Built in strategies","p":["Resource Manager provides a number of built in strategies for built in resource types and are loaded into Resource Manager at startup.","Built in strategies code base","Built in strategies unit tests","These strategies need to be tested/built and packaged for Resource Manager. This test/build process in scrips section of package.json while the packaging part can be found in generate.go.","Resource types associated with these strategies can be found in load_builtin_resources.go."]},{"l":"Unit tests"},{"l":"Integration tests"},{"l":"API tests","p":["There's a number of api tests available and can be executed using integration-test.sh. These tests need to be executed against Resource Manager running as a black box (ideally as a container)."]},{"l":"Wasmer","p":["There's a number of tests testing core components that require wasmer, quickjs and python packages to be available. It is recommended to run these tests in a docker container.","Example execution:"]},{"l":"Additional info"},{"l":"Telementry","p":["Support for tracing (distributed tracing). Streams data into a collector such as Jaeger. Default is Nop. See main parameters or telementry/config.go for further details to enable jaeger tracing"]},{"l":"Health","p":["Basic health info of the app (also checks if mysql connection is healthy)"]},{"l":"Metrics","p":["Prometheus style metrics are exposed at:"]}]] \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 9f354c11d..6e9004acd 100644 Binary files a/sitemap.xml.gz and b/sitemap.xml.gz differ