diff --git a/index.html b/index.html
index eb3f20606..b9f97f77c 100644
--- a/index.html
+++ b/index.html
@@ -6,7 +6,7 @@
diff --git a/resources/js/config.js b/resources/js/config.js
index 380f8dde7..4e65bb025 100644
--- a/resources/js/config.js
+++ b/resources/js/config.js
@@ -1 +1 @@
-var __DOCS_CONFIG__ = {"id":"jK55ngCu2np2m4iqGm0zPH1jVTocoGVpEx","key":"GDcQ2XYGXpuDabexpdUk6YwRcc4r0RxRRTyrRk6zhAM.JUAy7kIuYXMpzXDDLkzMQzXulWcZfRiHJvswqt15ZlL9u+Fgh3GxagpDkcUVEeyMQvMeRsWMVgBOr3a2wLmPmg.108286","base":"/","host":"docs.frinx.io","version":"1.0.0","useRelativePaths":true,"documentName":"index.html","appendDocumentName":false,"trailingSlash":true,"preloadSearch":false,"cacheBustingToken":"2.4.0.752073333926","cacheBustingStrategy":"query","sidebarFilterPlaceholder":"Filter","toolbarFilterPlaceholder":"Filter","showSidebarFilter":true,"filterNotFoundMsg":"No member names found containing the query \"{query}\"","maxHistoryItems":15,"homeIcon":"
","access":[{"value":"public","label":"Public"},{"value":"protected","label":"Protected"}],"toolbarLinks":[{"id":"fields","label":"Fields"},{"id":"properties","label":"Properties","shortLabel":"Props"},{"id":"methods","label":"Methods"},{"id":"events","label":"Events"}],"sidebar":[{"n":"/","l":"Welcome","s":"
"},{"n":"frinx-machine","l":"Frinx Machine","c":false,"i":[{"n":"getting-started","l":"FRINX Machine introduction","s":"
"},{"n":"azure-ad","l":"Frinx Machine with Azure AD","s":"
"},{"n":"monitoring","l":"Monitoring with Grafana","s":"
"},{"n":"use-cases","l":"Demo Use Cases","i":[{"n":"add-to-inventory-and-install","l":"Add a device to inventory and install it"},{"n":"create-l2-vpn-p2p","l":"Creating a Layer 2 VPN Point-to-Point Connection"},{"n":"frinx-machine-demo-manual","l":"FRINX Machine Demo Manual"},{"n":"install-all-devices-from-inventory","l":"Install all devices from inventory"},{"n":"policy-filter-xr","l":"Policy filter XR"}],"s":"
"}],"s":"
"},{"n":"frinx-uniconfig","l":"Frinx UniConfig","c":false,"i":[{"n":"getting-started","l":"Getting started","s":"
"},{"n":"user-guide","l":"User Guide","i":[{"n":"basic-concepts","l":"Basic Concepts"},{"n":"network-management-protocols","l":"Device Installation","i":[{"n":"uniconfig-installing","l":"Device Installation"},{"n":"uniconfig_cli","l":"UniConfig CLI"},{"n":"uniconfig_netconf","l":"UniConfig NETCONF","i":[{"n":"calix","l":"Calix devices"},{"n":"iosxr","l":"Cisco IOS XR devices"},{"n":"ocnos","l":"IP Infusion OcNOS Devices"},{"n":"junos","l":"Juniper Junos devices"},{"n":"sros","l":"Nokia SROS devices"}]},{"n":"uniconfig_snmp","l":"UniConfig SNMP"},{"n":"updating-installation-parameters","l":"Updating installation parameters"},{"n":"uniconfig-native_cli","l":"UniConfig-native CLI"}]},{"n":"uniconfig-operations","l":"UniConfig Operations","i":[{"n":"jsonb-filtering","l":"JSONB Filtering","i":[{"n":"application-jsonb-filtering","l":"Application JSONB Filtering"},{"n":"database-jsonb-filtering","l":"Database JSONB Filtering"}]},{"n":"snapshot-manager","l":"Snapshot Manager","i":[{"n":"obtain_snapshot_metadata","l":"Obtaining snapshots-metadata"},{"n":"rpc_create-snapshot","l":"RPC create-snapshot"},{"n":"rpc_delete-snapshot","l":"RPC delete-snapshot"},{"n":"rpc_replace-config-with-snapshot","l":"RPC replace-config-with-snapshot"}]},{"n":"subtree-manager","l":"Subtree Manager","i":[{"n":"rpc_bulk-edit","l":"RPC bulk-edit"},{"n":"rpc_calculate-subtree-diff","l":"RPC calculate-subtree-diff"},{"n":"rpc_calculate-subtree-git-like-diff","l":"RPC calculate-subtree-git-like-diff"},{"n":"rpc_copy-many-to-one","l":"RPC copy-many-to-one"},{"n":"rpc_copy-one-to-many","l":"RPC copy-one-to-many"},{"n":"rpc_copy-one-to-one","l":"RPC copy-one-to-one"}]},{"n":"transaction-log","l":"Transaction Log","i":[{"n":"rpc_revert-changes","l":"RPC revert-changes"},{"n":"transaction-tracker","l":"Transaction tracker"}]},{"n":"uniconfig-node-manager","l":"UniConfig Node Manager","i":[{"n":"rpc_calculate-diff","l":"RPC calculate-diff"},{"n":"rpc_calculate-git-like-diff","l":"RPC calculate-git-like-diff"},{"n":"uniconfig_check_installed_devices","l":"RPC check-installed-nodes"},{"n":"rpc_checked-commit","l":"RPC checked-commit"},{"n":"rpc_commit","l":"RPC commit"},{"n":"rpc_compare-config","l":"RPC compare-config"},{"n":"uniconfig_get_installed_devices","l":"RPC get-installed-nodes"},{"n":"rpc_health","l":"RPC health"},{"n":"uniconfig_install_multiple_nodes","l":"RPC install-multiple-nodes"},{"n":"rpc_is-in-sync","l":"RPC is-in-sync"},{"n":"rpc_replace-config-with-oper","l":"RPC replace-config-with-operational"},{"n":"rpc_sync-from-network","l":"RPC sync-from-network"},{"n":"rpc_sync-to-network","l":"RPC sync-to-network"},{"n":"uniconfig_uninstall_multiple_nodes","l":"RPC uninstall-multiple-nodes"},{"n":"rpc_validate","l":"RPC validate"}]},{"n":"uniconfig-properties","l":"UniConfig properties","i":[{"n":"rpc_read-properties","l":"RPC read-properties"},{"n":"rpc_update-properties","l":"RPC update-properties"}]},{"n":"utilities","l":"Utilities","i":[{"n":"openapi-diff","l":"Difference between OpenAPI specifications"},{"n":"yang-packager","l":"YANG packager"}]},{"n":"admin-state","l":"Admin State"},{"n":"build-and-commit-model","l":"Build-and-Commit Model"},{"n":"device-discovery","l":"Device Discovery"},{"n":"dryrun-manager","l":"Dry-run manager"},{"n":"immediate-commit-model","l":"Immediate Commit Model"},{"n":"kafka-notifications","l":"Kafka Notifications"},{"n":"operational-data-about-transactions","l":"Operational data about transactions"},{"n":"templates-manager","l":"Templates Manager"},{"n":"restconf","l":"UniConfig - Sending and receiving data (RESTCONF)"},{"n":"uniconfig-queries","l":"UniConfig Queries"},{"n":"uniconfig-shell","l":"UniConfig Shell"},{"n":"unistore-api","l":"UniStore API"},{"n":"yang-patch","l":"YANG Patch Operations"}]},{"n":"operational-procedures","l":"Operational Procedures","i":[{"n":"data-flows","l":"Data flows and transformations"},{"n":"data-security-models","l":"Data Security Models"},{"n":"logging","l":"Logging Framework"},{"n":"openapi","l":"OpenAPI"},{"n":"thread-pools","l":"Thread pools"},{"n":"postgres-tls","l":"TLS encryption for Postgres database"},{"n":"tls","l":"TLS-based Authentication"},{"n":"uniconfig-clustering","l":"UniConfig Clustering"},{"n":"uniconfig-properties","l":"Uniconfig properties"}]},{"n":"performance-and-scale","l":"Performance and scale","c":false,"i":[{"n":"performance_characteristics","l":"Performance characteristics"}]},{"n":"monitoring","l":"Monitoring"},{"n":"sdk","l":"UniConfig Client (SDK)"}]},{"n":"developer-guide","l":"Developer Guide","i":[{"n":"architecture","l":"Architecture"},{"n":"translation-units-in-general","l":"Translation Units in general"},{"n":"translation-units-docs","l":"Translation Units Documentation for FRINX Uniconfig"},{"n":"open-config-to-device-config-mapping","l":"OpenConfig to device config mapping"},{"n":"translation-unit-general-implementation","l":"Developing a new translation unit"},{"n":"cli-translation-unit","l":"Implementing CLI Translation Unit"},{"n":"netconf-translation-unit","l":"NETCONF Unified Translation Unit"},{"n":"native-cli-units","l":"Native-CLI translation units"},{"n":"metrics","l":"Metrics"}]},{"n":"release-notes","l":"Release notes","i":[{"n":"uniconfig-5.0.7","l":"Uniconfig 5.0.7 Release Notes"},{"n":"uniconfig-5.0.6","l":"UniConfig 5.0.6"},{"n":"uniconfig-5.0.5","l":"UniConfig 5.0.5"},{"n":"uniconfig-5.0.4","l":"UniConfig 5.0.4"},{"n":"uniconfig-5.0.3","l":"UniConfig 5.0.3"},{"n":"uniconfig-5.0.2","l":"UniConfig 5.0.2"},{"n":"uniconfig-5.0.1","l":"UniConfig 5.0.1"},{"n":"uniconfig-4.2.10","l":"UniConfig 4.2.10"},{"n":"uniconfig-4.2.9","l":"UniConfig 4.2.9"},{"n":"uniconfig-4.2.8","l":"UniConfig 4.2.8"},{"n":"uniconfig-4.2.7","l":"UniConfig 4.2.7"},{"n":"uniconfig-4.2.6","l":"UniConfig 4.2.6"},{"n":"uniconfig-4.2.5","l":"UniConfig 4.2.5"},{"n":"uniconfig-4.2.4","l":"UniConfig 4.2.4"},{"n":"uniconfig-4.2.3","l":"UniConfig 4.2.3"},{"n":"uniconfig-5.0.8","l":"Uniconfig 5.0.8 Release Notes"},{"n":"uniconfig-5.0.9","l":"Uniconfig 5.0.9 Release Notes"},{"n":"uniconfig-5.0.10","l":"Uniconfig 5.0.10 Release Notes"},{"n":"uniconfig-5.0.11","l":"Uniconfig 5.0.11 Release Notes"},{"n":"uniconfig-5.0.12","l":"Uniconfig 5.0.12 Release Notes"},{"n":"uniconfig-5.0.13","l":"Uniconfig 5.0.13 Release Notes"},{"n":"uniconfig-5.0.14","l":"Uniconfig 5.0.14 Release Notes"},{"n":"uniconfig-5.0.15","l":"Uniconfig 5.0.15 Release Notes"},{"n":"uniconfig-5.0.16","l":"Uniconfig 5.0.16 Release Notes"},{"n":"uniconfig-5.0.17","l":"Uniconfig 5.0.17 Release Notes"},{"n":"uniconfig-5.0.18","l":"Uniconfig 5.0.18 Release Notes"},{"n":"uniconfig-5.0.19","l":"Uniconfig 5.0.19 Release Notes"},{"n":"uniconfig-5.0.20","l":"Uniconfig 5.0.20 Release Notes"},{"n":"uniconfig-5.0.21","l":"Uniconfig 5.0.21 Release Notes"},{"n":"uniconfig-5.0.22","l":"Uniconfig 5.0.22 Release Notes"},{"n":"uniconfig-5.0.23","l":"Uniconfig 5.0.23 Release Notes"},{"n":"uniconfig-5.0.24","l":"Uniconfig 5.0.24 Release Notes"},{"n":"uniconfig-5.0.25","l":"Uniconfig 5.0.25 Release Notes"},{"n":"uniconfig-5.1.0","l":"Uniconfig 5.1.0 Release Notes"},{"n":"uniconfig-5.1.1","l":"Uniconfig 5.1.1 Release Notes"},{"n":"uniconfig-5.1.2","l":"Uniconfig 5.1.2 Release Notes"},{"n":"uniconfig-5.1.3","l":"Uniconfig 5.1.3 Release Notes"},{"n":"uniconfig-5.1.4","l":"Uniconfig 5.1.4 Release Notes"},{"n":"uniconfig-5.1.5","l":"Uniconfig 5.1.5 Release Notes"},{"n":"uniconfig-5.1.6","l":"Uniconfig 5.1.6 Release Notes"},{"n":"uniconfig-5.1.7","l":"Uniconfig 5.1.7 Release Notes"},{"n":"uniconfig-5.1.8","l":"Uniconfig 5.1.8 Release Notes"},{"n":"uniconfig-5.1.9","l":"Uniconfig 5.1.9 Release Notes"},{"n":"uniconfig-5.1.10","l":"Uniconfig 5.1.10 Release Notes"},{"n":"uniconfig-5.1.11","l":"Uniconfig 5.1.11 Release Notes"},{"n":"uniconfig-5.1.12","l":"Uniconfig 5.1.12 Release Notes"},{"n":"uniconfig-5.1.13","l":"Uniconfig 5.1.13"},{"n":"uniconfig-5.2.0","l":"Uniconfig 5.2.0 Release Notes"},{"n":"uniconfig-5.2.1","l":"Uniconfig 5.2.1"},{"n":"uniconfig-5.2.2","l":"Uniconfig 5.2.2"}]},{"n":"translation-units-docs","l":"Translation Units","i":[{"n":"ietf-to-oc-mapping","l":"Ietf to oc mapping","c":false,"i":[{"n":"ietf_l2p2p_local_to_oc","l":"IETF L2VPN YANG"},{"n":"ietf_l2p2p_remote_to_oc","l":"IETF L2VPN YANG"},{"n":"ietf_l2vpn_to_oc","l":"IETF L2VPN YANG"},{"n":"ietf_l3vpn_to_oc","l":"IETF L3VPN YANG"}]},{"n":"configuration-datasets","l":"Interfaces","i":[{"n":"acl","l":"Acl","c":false,"i":[{"n":"acl_interfaces","l":"Access Control List"},{"n":"acl","l":"Access Control List"}]},{"n":"cable","l":"Cable","c":false,"i":[{"n":"cable_downstream_profile","l":"cable DOWNSTREAM CONTROLLER-PROFILE"},{"n":"cable_fiber_node","l":"cable FIBER-NODE"},{"n":"cable_rpd","l":"cable RPD"}]},{"n":"interfaces","l":"Interfaces","c":false,"i":[{"n":"bridge_interface","l":"BRIDGE interface"},{"n":"cable_interface","l":"CABLE interface"},{"n":"ethernet_interface","l":"Ethernet interface"},{"n":"l2vlan_interface","l":"L2VLAN interface"},{"n":"l3vlan_interface","l":"L3 VLAN interface"},{"n":"lag_interface","l":"Link Aggregation Group (bundle) interface"},{"n":"wideband_interface","l":"WIDEBAND interface"}]},{"n":"ipsec","l":"Ipsec","c":false,"i":[{"n":"ipsec","l":"Internet Protocol Security (IPsec)"}]},{"n":"netflow","l":"Netflow","c":false,"i":[{"n":"netflow_interfaces","l":"NetFlow"}]},{"n":"network-instances","l":"Network instances","c":false,"i":[{"n":"l2p2p","l":"L2p2p","c":false,"i":[{"n":"connection_point","l":"L2P2P configuration"}]},{"n":"l2vpn","l":"L2vpn","c":false,"i":[{"n":"connection_point_l2vpn","l":"L2VPN (VPLS with BGP autodiscovery) configuration"}]},{"n":"l2vsi","l":"L2vsi","c":false,"i":[{"n":"l2vsicp","l":"L2VSI (L2 virtual switch instance virtual circuit)"},{"n":"l2vsi","l":"L2VSI (L2 virtual switch instance)"}]},{"n":"l3vpn","l":"L3vpn","c":false,"i":[{"n":"network_instance_l3vpn_bgp","l":"L3VPN configuration (BGP as CE-PE protocol)"},{"n":"network_instance_l3vpn_ospf","l":"L3VPN configuration (OSPF as CE-PE protocol)"}]},{"n":"mpls","l":"Mpls","c":false,"i":[{"n":"mpls_ldp","l":"Multiprotocol Label Switching - Label Distribution Protocol (MPLS LDP)"},{"n":"mpls_rsvp","l":"Multiprotocol Label Switching - Resource Reservation Protocol (MPLS RSVP)"},{"n":"mpls_te","l":"Multiprotocol Label Switching - Traffic Engineering (MPLS-TE)"},{"n":"mpls_tunnel","l":"Multiprotocol Label Switching - Tunnel"}]},{"n":"policy-forwarding","l":"Policy forwarding","c":false,"i":[{"n":"pf_interfaces","l":"Interface policy configuration"}]},{"n":"protocols","l":"Protocols","c":false,"i":[{"n":"bgp","l":"Border Gateway Protocol (BGP)"},{"n":"isis","l":"Intermediate System to Intermediate System (IS-IS)"},{"n":"ospf","l":"Open Shortest Path First (OSPF)"},{"n":"ospfv3","l":"Open Shortest Path First v3 (OSPFv3)"},{"n":"static","l":"Static Route"}]},{"n":"vlans","l":"Vlans","c":false,"i":[{"n":"vlan","l":"VLAN"}]},{"n":"network_instance","l":"Configure network instance (VRF)"}]},{"n":"routing-policy","l":"Routing policy","c":false,"i":[{"n":"routing-policy","l":"Routing Policy"}]},{"n":"aaa","l":"AAA - Authentication Authorization Accounting"},{"n":"bcast-containment","l":"Broadcast-Containment (Broadcast-containment filters)"},{"n":"cdp","l":"Configure CDP interfaces"},{"n":"fdp","l":"Configure FDP interfaces"},{"n":"stp","l":"Configure STP interfaces"},{"n":"oam","l":"Ethernet OAM / Ethernet CFM"},{"n":"evc","l":"Ethernet Virtual Circuit (EVC)"},{"n":"evpn","l":"Ethernet Virtual Private Network (EVPN)"},{"n":"hsrp","l":"Hot Standby Router Protocol (HSRP)"},{"n":"l2-cft","l":"L2-Cft (Layer 2 Control Frame Forwarding)"},{"n":"logging","l":"Logging (syslog)"},{"n":"privilege","l":"Privilege"},{"n":"probes","l":"Probes"},{"n":"qos","l":"Quality of Service"},{"n":"relay-agent","l":"Relay Agent"},{"n":"snmp","l":"Simple Network Management Protocol (SNMP)"},{"n":"system","l":"System-wide services and functions"}]},{"n":"operational-datasets","l":"Network Instances","i":[{"n":"network-instances","l":"Network instances","c":false,"i":[{"n":"protocols","l":"Protocols","c":false,"i":[{"n":"bgp_summary","l":"BGP global + neighbors"},{"n":"bgp_rib","l":"BGP RIB"},{"n":"ospf_summary","l":"Show router ospf type, ID, interfaces"}]}]},{"n":"interfaces","l":"Interfaces"},{"n":"platform","l":"Platform"},{"n":"cdp","l":"Show CDP interfaces and neighbors"},{"n":"lldp","l":"Show LLDP interfaces and neighbors"},{"n":"system","l":"System"}]},{"n":"translation-framework-101","l":"Table of Contents"}]},{"n":"q_a","l":"FAQ","s":"
"},{"n":"glossary-of-terms","l":"Glossary of Terms"},{"n":"supported-devices","l":"List of Supported Devices"}],"s":"
"},{"n":"frinx-workflow-manager","l":"Frinx Workflow Manager","c":false,"i":[{"n":"introduction","l":"FRINX Workflow Manager introduction"},{"n":"create-and-modify-workflows","l":"Create and Modify Workflows and Workers"},{"n":"blueprints","l":"Device Blueprints"},{"n":"inventory","l":"Device Inventory"},{"n":"workflow-builder","l":"Workflow Builder"}],"s":"
"},{"n":"frinx-resource-manager","l":"Frinx Resource Manager","c":false,"i":[{"n":"introduction","l":"FRINX Resource Manager introduction"},{"n":"user-guide","l":"User Guide"},{"n":"pools","l":"Pools"},{"n":"architecture","l":"Resource Manager architecture"},{"n":"developer-guide","l":"Developer Guide"}],"s":"
"}],"search":{"mode":0,"minChars":2,"maxResults":20,"placeholder":"Search","hotkeys":["/"],"noResultsFoundMsg":"Sorry, no results found.","recognizeLanguages":true,"languages":[0],"preload":false}};
+var __DOCS_CONFIG__ = {"id":"IwP142d7/gzu2QiqTCd63VNRLJ6d5DMs2RH","key":"gdTzs/B6i9jTGE5mu1m4zH+6jjpBg7EfaeIRt/Hj74s.d78VjKLlzdXF9JXUkzbXxUplF9YFuK6gnCdw4VeleUy65vCy1A5t6P6VVWkcgObLzdrkFxaaDISNZJdDaa3naA.108198","base":"/","host":"docs.frinx.io","version":"1.0.0","useRelativePaths":true,"documentName":"index.html","appendDocumentName":false,"trailingSlash":true,"preloadSearch":false,"cacheBustingToken":"2.4.0.752073393482","cacheBustingStrategy":"query","sidebarFilterPlaceholder":"Filter","toolbarFilterPlaceholder":"Filter","showSidebarFilter":true,"filterNotFoundMsg":"No member names found containing the query \"{query}\"","maxHistoryItems":15,"homeIcon":"
","access":[{"value":"public","label":"Public"},{"value":"protected","label":"Protected"}],"toolbarLinks":[{"id":"fields","label":"Fields"},{"id":"properties","label":"Properties","shortLabel":"Props"},{"id":"methods","label":"Methods"},{"id":"events","label":"Events"}],"sidebar":[{"n":"/","l":"Welcome","s":"
"},{"n":"frinx-machine","l":"Frinx Machine","c":false,"i":[{"n":"getting-started","l":"FRINX Machine introduction","s":"
"},{"n":"azure-ad","l":"Frinx Machine with Azure AD","s":"
"},{"n":"monitoring","l":"Monitoring with Grafana","s":"
"},{"n":"use-cases","l":"Demo Use Cases","i":[{"n":"add-to-inventory-and-install","l":"Add a device to inventory and install it"},{"n":"create-l2-vpn-p2p","l":"Creating a Layer 2 VPN Point-to-Point Connection"},{"n":"frinx-machine-demo-manual","l":"FRINX Machine Demo Manual"},{"n":"install-all-devices-from-inventory","l":"Install all devices from inventory"},{"n":"policy-filter-xr","l":"Policy filter XR"}],"s":"
"}],"s":"
"},{"n":"frinx-uniconfig","l":"Frinx UniConfig","c":false,"i":[{"n":"getting-started","l":"Getting started","s":"
"},{"n":"user-guide","l":"User Guide","i":[{"n":"basic-concepts","l":"Basic Concepts"},{"n":"network-management-protocols","l":"Device Installation","i":[{"n":"uniconfig-installing","l":"Device Installation"},{"n":"uniconfig_cli","l":"UniConfig CLI"},{"n":"uniconfig_netconf","l":"UniConfig NETCONF","i":[{"n":"calix","l":"Calix devices"},{"n":"iosxr","l":"Cisco IOS XR devices"},{"n":"ocnos","l":"IP Infusion OcNOS Devices"},{"n":"junos","l":"Juniper Junos devices"},{"n":"sros","l":"Nokia SROS devices"}]},{"n":"uniconfig_snmp","l":"UniConfig SNMP"},{"n":"updating-installation-parameters","l":"Updating installation parameters"},{"n":"uniconfig-native_cli","l":"UniConfig-native CLI"}]},{"n":"uniconfig-operations","l":"UniConfig Operations","i":[{"n":"jsonb-filtering","l":"JSONB Filtering","i":[{"n":"application-jsonb-filtering","l":"Application JSONB Filtering"},{"n":"database-jsonb-filtering","l":"Database JSONB Filtering"}]},{"n":"snapshot-manager","l":"Snapshot Manager","i":[{"n":"obtain_snapshot_metadata","l":"Obtaining snapshots-metadata"},{"n":"rpc_create-snapshot","l":"RPC create-snapshot"},{"n":"rpc_delete-snapshot","l":"RPC delete-snapshot"},{"n":"rpc_replace-config-with-snapshot","l":"RPC replace-config-with-snapshot"}]},{"n":"subtree-manager","l":"Subtree Manager","i":[{"n":"rpc_bulk-edit","l":"RPC bulk-edit"},{"n":"rpc_calculate-subtree-diff","l":"RPC calculate-subtree-diff"},{"n":"rpc_calculate-subtree-git-like-diff","l":"RPC calculate-subtree-git-like-diff"},{"n":"rpc_copy-many-to-one","l":"RPC copy-many-to-one"},{"n":"rpc_copy-one-to-many","l":"RPC copy-one-to-many"},{"n":"rpc_copy-one-to-one","l":"RPC copy-one-to-one"}]},{"n":"transaction-log","l":"Transaction Log","i":[{"n":"rpc_revert-changes","l":"RPC revert-changes"},{"n":"transaction-tracker","l":"Transaction tracker"}]},{"n":"uniconfig-node-manager","l":"UniConfig Node Manager","i":[{"n":"rpc_calculate-diff","l":"RPC calculate-diff"},{"n":"rpc_calculate-git-like-diff","l":"RPC calculate-git-like-diff"},{"n":"uniconfig_check_installed_devices","l":"RPC check-installed-nodes"},{"n":"rpc_checked-commit","l":"RPC checked-commit"},{"n":"rpc_commit","l":"RPC commit"},{"n":"rpc_compare-config","l":"RPC compare-config"},{"n":"uniconfig_get_installed_devices","l":"RPC get-installed-nodes"},{"n":"rpc_health","l":"RPC health"},{"n":"uniconfig_install_multiple_nodes","l":"RPC install-multiple-nodes"},{"n":"rpc_is-in-sync","l":"RPC is-in-sync"},{"n":"rpc_replace-config-with-oper","l":"RPC replace-config-with-operational"},{"n":"rpc_sync-from-network","l":"RPC sync-from-network"},{"n":"rpc_sync-to-network","l":"RPC sync-to-network"},{"n":"uniconfig_uninstall_multiple_nodes","l":"RPC uninstall-multiple-nodes"},{"n":"rpc_validate","l":"RPC validate"}]},{"n":"uniconfig-properties","l":"UniConfig properties","i":[{"n":"rpc_read-properties","l":"RPC read-properties"},{"n":"rpc_update-properties","l":"RPC update-properties"}]},{"n":"utilities","l":"Utilities","i":[{"n":"openapi-diff","l":"Difference between OpenAPI specifications"},{"n":"yang-packager","l":"YANG packager"}]},{"n":"admin-state","l":"Admin State"},{"n":"build-and-commit-model","l":"Build-and-Commit Model"},{"n":"device-discovery","l":"Device Discovery"},{"n":"dryrun-manager","l":"Dry-run manager"},{"n":"immediate-commit-model","l":"Immediate Commit Model"},{"n":"kafka-notifications","l":"Kafka Notifications"},{"n":"operational-data-about-transactions","l":"Operational data about transactions"},{"n":"templates-manager","l":"Templates Manager"},{"n":"restconf","l":"UniConfig - Sending and receiving data (RESTCONF)"},{"n":"uniconfig-queries","l":"UniConfig Queries"},{"n":"uniconfig-shell","l":"UniConfig Shell"},{"n":"unistore-api","l":"UniStore API"},{"n":"yang-patch","l":"YANG Patch Operations"}]},{"n":"operational-procedures","l":"Operational Procedures","i":[{"n":"data-flows","l":"Data flows and transformations"},{"n":"data-security-models","l":"Data Security Models"},{"n":"logging","l":"Logging Framework"},{"n":"openapi","l":"OpenAPI"},{"n":"thread-pools","l":"Thread pools"},{"n":"postgres-tls","l":"TLS encryption for Postgres database"},{"n":"tls","l":"TLS-based Authentication"},{"n":"uniconfig-clustering","l":"UniConfig Clustering"},{"n":"uniconfig-properties","l":"Uniconfig properties"}]},{"n":"performance-and-scale","l":"Performance and scale","c":false,"i":[{"n":"performance_characteristics","l":"Performance characteristics"}]},{"n":"monitoring","l":"Monitoring"},{"n":"sdk","l":"UniConfig Client (SDK)"}]},{"n":"developer-guide","l":"Developer Guide","i":[{"n":"architecture","l":"Architecture"},{"n":"translation-units-in-general","l":"Translation Units in general"},{"n":"translation-units-docs","l":"Translation Units Documentation for FRINX Uniconfig"},{"n":"open-config-to-device-config-mapping","l":"OpenConfig to device config mapping"},{"n":"translation-unit-general-implementation","l":"Developing a new translation unit"},{"n":"cli-translation-unit","l":"Implementing CLI Translation Unit"},{"n":"netconf-translation-unit","l":"NETCONF Unified Translation Unit"},{"n":"native-cli-units","l":"Native-CLI translation units"},{"n":"metrics","l":"Metrics"}]},{"n":"release-notes","l":"Release notes","i":[{"n":"uniconfig-5.0.7","l":"Uniconfig 5.0.7 Release Notes"},{"n":"uniconfig-5.0.6","l":"UniConfig 5.0.6"},{"n":"uniconfig-5.0.5","l":"UniConfig 5.0.5"},{"n":"uniconfig-5.0.4","l":"UniConfig 5.0.4"},{"n":"uniconfig-5.0.3","l":"UniConfig 5.0.3"},{"n":"uniconfig-5.0.2","l":"UniConfig 5.0.2"},{"n":"uniconfig-5.0.1","l":"UniConfig 5.0.1"},{"n":"uniconfig-4.2.10","l":"UniConfig 4.2.10"},{"n":"uniconfig-4.2.9","l":"UniConfig 4.2.9"},{"n":"uniconfig-4.2.8","l":"UniConfig 4.2.8"},{"n":"uniconfig-4.2.7","l":"UniConfig 4.2.7"},{"n":"uniconfig-4.2.6","l":"UniConfig 4.2.6"},{"n":"uniconfig-4.2.5","l":"UniConfig 4.2.5"},{"n":"uniconfig-4.2.4","l":"UniConfig 4.2.4"},{"n":"uniconfig-4.2.3","l":"UniConfig 4.2.3"},{"n":"uniconfig-5.0.8","l":"Uniconfig 5.0.8 Release Notes"},{"n":"uniconfig-5.0.9","l":"Uniconfig 5.0.9 Release Notes"},{"n":"uniconfig-5.0.10","l":"Uniconfig 5.0.10 Release Notes"},{"n":"uniconfig-5.0.11","l":"Uniconfig 5.0.11 Release Notes"},{"n":"uniconfig-5.0.12","l":"Uniconfig 5.0.12 Release Notes"},{"n":"uniconfig-5.0.13","l":"Uniconfig 5.0.13 Release Notes"},{"n":"uniconfig-5.0.14","l":"Uniconfig 5.0.14 Release Notes"},{"n":"uniconfig-5.0.15","l":"Uniconfig 5.0.15 Release Notes"},{"n":"uniconfig-5.0.16","l":"Uniconfig 5.0.16 Release Notes"},{"n":"uniconfig-5.0.17","l":"Uniconfig 5.0.17 Release Notes"},{"n":"uniconfig-5.0.18","l":"Uniconfig 5.0.18 Release Notes"},{"n":"uniconfig-5.0.19","l":"Uniconfig 5.0.19 Release Notes"},{"n":"uniconfig-5.0.20","l":"Uniconfig 5.0.20 Release Notes"},{"n":"uniconfig-5.0.21","l":"Uniconfig 5.0.21 Release Notes"},{"n":"uniconfig-5.0.22","l":"Uniconfig 5.0.22 Release Notes"},{"n":"uniconfig-5.0.23","l":"Uniconfig 5.0.23 Release Notes"},{"n":"uniconfig-5.0.24","l":"Uniconfig 5.0.24 Release Notes"},{"n":"uniconfig-5.0.25","l":"Uniconfig 5.0.25 Release Notes"},{"n":"uniconfig-5.1.0","l":"Uniconfig 5.1.0 Release Notes"},{"n":"uniconfig-5.1.1","l":"Uniconfig 5.1.1 Release Notes"},{"n":"uniconfig-5.1.2","l":"Uniconfig 5.1.2 Release Notes"},{"n":"uniconfig-5.1.3","l":"Uniconfig 5.1.3 Release Notes"},{"n":"uniconfig-5.1.4","l":"Uniconfig 5.1.4 Release Notes"},{"n":"uniconfig-5.1.5","l":"Uniconfig 5.1.5 Release Notes"},{"n":"uniconfig-5.1.6","l":"Uniconfig 5.1.6 Release Notes"},{"n":"uniconfig-5.1.7","l":"Uniconfig 5.1.7 Release Notes"},{"n":"uniconfig-5.1.8","l":"Uniconfig 5.1.8 Release Notes"},{"n":"uniconfig-5.1.9","l":"Uniconfig 5.1.9 Release Notes"},{"n":"uniconfig-5.1.10","l":"Uniconfig 5.1.10 Release Notes"},{"n":"uniconfig-5.1.11","l":"Uniconfig 5.1.11 Release Notes"},{"n":"uniconfig-5.1.12","l":"Uniconfig 5.1.12 Release Notes"},{"n":"uniconfig-5.1.13","l":"Uniconfig 5.1.13"},{"n":"uniconfig-5.1.14","l":"Uniconfig 5.1.14"},{"n":"uniconfig-5.2.0","l":"Uniconfig 5.2.0 Release Notes"},{"n":"uniconfig-5.2.1","l":"Uniconfig 5.2.1"},{"n":"uniconfig-5.2.2","l":"Uniconfig 5.2.2"}]},{"n":"translation-units-docs","l":"Translation Units","i":[{"n":"ietf-to-oc-mapping","l":"Ietf to oc mapping","c":false,"i":[{"n":"ietf_l2p2p_local_to_oc","l":"IETF L2VPN YANG"},{"n":"ietf_l2p2p_remote_to_oc","l":"IETF L2VPN YANG"},{"n":"ietf_l2vpn_to_oc","l":"IETF L2VPN YANG"},{"n":"ietf_l3vpn_to_oc","l":"IETF L3VPN YANG"}]},{"n":"configuration-datasets","l":"Interfaces","i":[{"n":"acl","l":"Acl","c":false,"i":[{"n":"acl_interfaces","l":"Access Control List"},{"n":"acl","l":"Access Control List"}]},{"n":"cable","l":"Cable","c":false,"i":[{"n":"cable_downstream_profile","l":"cable DOWNSTREAM CONTROLLER-PROFILE"},{"n":"cable_fiber_node","l":"cable FIBER-NODE"},{"n":"cable_rpd","l":"cable RPD"}]},{"n":"interfaces","l":"Interfaces","c":false,"i":[{"n":"bridge_interface","l":"BRIDGE interface"},{"n":"cable_interface","l":"CABLE interface"},{"n":"ethernet_interface","l":"Ethernet interface"},{"n":"l2vlan_interface","l":"L2VLAN interface"},{"n":"l3vlan_interface","l":"L3 VLAN interface"},{"n":"lag_interface","l":"Link Aggregation Group (bundle) interface"},{"n":"wideband_interface","l":"WIDEBAND interface"}]},{"n":"ipsec","l":"Ipsec","c":false,"i":[{"n":"ipsec","l":"Internet Protocol Security (IPsec)"}]},{"n":"netflow","l":"Netflow","c":false,"i":[{"n":"netflow_interfaces","l":"NetFlow"}]},{"n":"network-instances","l":"Network instances","c":false,"i":[{"n":"l2p2p","l":"L2p2p","c":false,"i":[{"n":"connection_point","l":"L2P2P configuration"}]},{"n":"l2vpn","l":"L2vpn","c":false,"i":[{"n":"connection_point_l2vpn","l":"L2VPN (VPLS with BGP autodiscovery) configuration"}]},{"n":"l2vsi","l":"L2vsi","c":false,"i":[{"n":"l2vsicp","l":"L2VSI (L2 virtual switch instance virtual circuit)"},{"n":"l2vsi","l":"L2VSI (L2 virtual switch instance)"}]},{"n":"l3vpn","l":"L3vpn","c":false,"i":[{"n":"network_instance_l3vpn_bgp","l":"L3VPN configuration (BGP as CE-PE protocol)"},{"n":"network_instance_l3vpn_ospf","l":"L3VPN configuration (OSPF as CE-PE protocol)"}]},{"n":"mpls","l":"Mpls","c":false,"i":[{"n":"mpls_ldp","l":"Multiprotocol Label Switching - Label Distribution Protocol (MPLS LDP)"},{"n":"mpls_rsvp","l":"Multiprotocol Label Switching - Resource Reservation Protocol (MPLS RSVP)"},{"n":"mpls_te","l":"Multiprotocol Label Switching - Traffic Engineering (MPLS-TE)"},{"n":"mpls_tunnel","l":"Multiprotocol Label Switching - Tunnel"}]},{"n":"policy-forwarding","l":"Policy forwarding","c":false,"i":[{"n":"pf_interfaces","l":"Interface policy configuration"}]},{"n":"protocols","l":"Protocols","c":false,"i":[{"n":"bgp","l":"Border Gateway Protocol (BGP)"},{"n":"isis","l":"Intermediate System to Intermediate System (IS-IS)"},{"n":"ospf","l":"Open Shortest Path First (OSPF)"},{"n":"ospfv3","l":"Open Shortest Path First v3 (OSPFv3)"},{"n":"static","l":"Static Route"}]},{"n":"vlans","l":"Vlans","c":false,"i":[{"n":"vlan","l":"VLAN"}]},{"n":"network_instance","l":"Configure network instance (VRF)"}]},{"n":"routing-policy","l":"Routing policy","c":false,"i":[{"n":"routing-policy","l":"Routing Policy"}]},{"n":"aaa","l":"AAA - Authentication Authorization Accounting"},{"n":"bcast-containment","l":"Broadcast-Containment (Broadcast-containment filters)"},{"n":"cdp","l":"Configure CDP interfaces"},{"n":"fdp","l":"Configure FDP interfaces"},{"n":"stp","l":"Configure STP interfaces"},{"n":"oam","l":"Ethernet OAM / Ethernet CFM"},{"n":"evc","l":"Ethernet Virtual Circuit (EVC)"},{"n":"evpn","l":"Ethernet Virtual Private Network (EVPN)"},{"n":"hsrp","l":"Hot Standby Router Protocol (HSRP)"},{"n":"l2-cft","l":"L2-Cft (Layer 2 Control Frame Forwarding)"},{"n":"logging","l":"Logging (syslog)"},{"n":"privilege","l":"Privilege"},{"n":"probes","l":"Probes"},{"n":"qos","l":"Quality of Service"},{"n":"relay-agent","l":"Relay Agent"},{"n":"snmp","l":"Simple Network Management Protocol (SNMP)"},{"n":"system","l":"System-wide services and functions"}]},{"n":"operational-datasets","l":"Network Instances","i":[{"n":"network-instances","l":"Network instances","c":false,"i":[{"n":"protocols","l":"Protocols","c":false,"i":[{"n":"bgp_summary","l":"BGP global + neighbors"},{"n":"bgp_rib","l":"BGP RIB"},{"n":"ospf_summary","l":"Show router ospf type, ID, interfaces"}]}]},{"n":"interfaces","l":"Interfaces"},{"n":"platform","l":"Platform"},{"n":"cdp","l":"Show CDP interfaces and neighbors"},{"n":"lldp","l":"Show LLDP interfaces and neighbors"},{"n":"system","l":"System"}]},{"n":"translation-framework-101","l":"Table of Contents"}]},{"n":"q_a","l":"FAQ","s":"
"},{"n":"glossary-of-terms","l":"Glossary of Terms"},{"n":"supported-devices","l":"List of Supported Devices"}],"s":"
"},{"n":"frinx-workflow-manager","l":"Frinx Workflow Manager","c":false,"i":[{"n":"introduction","l":"FRINX Workflow Manager introduction"},{"n":"create-and-modify-workflows","l":"Create and Modify Workflows and Workers"},{"n":"blueprints","l":"Device Blueprints"},{"n":"inventory","l":"Device Inventory"},{"n":"workflow-builder","l":"Workflow Builder"}],"s":"
"},{"n":"frinx-resource-manager","l":"Frinx Resource Manager","c":false,"i":[{"n":"introduction","l":"FRINX Resource Manager introduction"},{"n":"user-guide","l":"User Guide"},{"n":"pools","l":"Pools"},{"n":"architecture","l":"Resource Manager architecture"},{"n":"developer-guide","l":"Developer Guide"}],"s":"
"}],"search":{"mode":0,"minChars":2,"maxResults":20,"placeholder":"Search","hotkeys":["/"],"noResultsFoundMsg":"Sorry, no results found.","recognizeLanguages":true,"languages":[0],"preload":false}};
diff --git a/resources/js/search.json b/resources/js/search.json
index 52c569e3f..307121ead 100644
--- a/resources/js/search.json
+++ b/resources/js/search.json
@@ -1 +1 @@
-[[{"i":"welcome-to-frinx-documentation","l":"Welcome to FRINX Documentation!","p":["The FRINX documentation site contains all FRINX projects, releases and documentation. Please, use search bar in the upper left corner to find specific issues and information that you demand."]},{"l":"FRINX Machine","p":["FRINX Machine provides a platform allowing easy definition, execution and monitoring of complex workflows using FRINX UniConfig."]},{"l":"FRINX UniConfig","p":["FRINX UniConfig is a suite of applications aimed at network configuration management."]},{"l":"FRINX Workflow Manager","p":["FRINX Workflow Manager allows customers to create automated and repeatable digital processes to build, grow and operate their digital communication infrastructure."]},{"l":"FRINX Resource Manager","p":["FRINX Resource Manager helps network operators and infrastructure engineers manage their physical and logical assets and resources."]}],[{"l":"FRINX Machine introduction","p":["FRINX Machine is a dockerized deployment of multiple elements. The FRINX Machine enables large scale automation of network devices, services and retrieval of operational state data from a network. User specific workflows are designed through the use of OpenConfig NETCONF & YANG models, vendor native models, and the CLI. The FRINX Machine uses dockerized containers that are designed and tested to work together to create a user specific solution.","For installation, please refer to: FRINX Machine repository","FRINX-machine can be installed in Kubernetes using the Helm chart"]},{"l":"FRINX Machine components"},{"l":"FRINX UniConfig","p":["Connects to the devices in network","Retrieves and stores configuration from devices","Pushes configuration data to devices","Builds diffs between actual and intended config to execute atomic configuration changes","Retrieves operational data from devices","Manages transactions across one or multiple devices","Translates between CLI, vendor native, and industry standard data models (i.e. OpenConfig)","Reads and stores vendor native data models from mounted network devices (i.e YANG models)","Ensures high availability, reducing network outages and down time","Executes commands on multiple devices simultaneously"]},{"i":"netflix-conductor-workflow-engine","l":"Netflix Conductor (workflow engine)","p":["Atomic tasks are chained together into more complex workflows","Defines, executes and monitors workflows (via REST or UI)","We chose Netflix’s conductor workflow engine since it has been proven to be highly scalable open-source technology that integrates very well with FRINX UniConfig. Further information about conductor can be found at:","Sources: https://github.com/Netflix/conductor","Docs: https://netflix.github.io/conductor/"]},{"i":"elasticsearch-inventory-and-logs","l":"Elasticsearch (inventory and logs)","p":["Stores inventory data in near real-time","Stores workflow execution and meta data","Stores UniConfig logs"]},{"i":"uniconfig-ui-user-interface","l":"UniConfig UI (user interface)","p":["This is the primary user interface for the FRINX Machine","Allows users to create, edit or run workflows and monitor any open tasks","Allows users to mount devices and view their status. The UI allows users to execute UniConfig operations such as read, edit, and commit. Configurations can be pushed to or synced from the network","Inventory, workflow execution, metadata and UniConfig log files are all accessible through the UI","View inventory, workflow execution, metadata and UniConfig log files"]},{"l":"High Level Architecture","p":["Following diagram outlines main functional components in the FRINX Machine solution:","FM Architecture","FRINX Machine repository is available at https://github.com/FRINXio/FRINX-machine","Frinx-conductor repository is available at https://github.com/FRINXio/conductor"]},{"l":"Defining a workflow","p":["The workflows are defined using a JSON based domain specific language(DSL) by wiring a set of tasks together. The tasks are either control tasks (fork, conditional, etc.) or application tasks (i.e. encoding a file) that are executed on a remote device.","The FRINX Machine distribution comes pre-loaded with a number of standardized workflows","A detailed description of how to run workflows and tasks, along with examples, can be found in the official Netflix Conductor documentation"]},{"l":"Operating FRINX Machine","p":["To find out more about how to run the pre-packaged workflows, continue to Use cases"]}],[{"l":"Frinx Machine with Azure AD","p":["Frinx Machine supports authentification and authorization via Azure AD. The following sections describe how to set up Azure AD for Frinx Machine."]},{"l":"Client configuration","p":["Register the application in your Azure AD and configure the following settings."]},{"l":"Redirect URIs","p":["Cloud Postman","Cloud swagger","Frontend Login","Frontent login URI is passed to the installation script azure_ad.sh via --redirect_url flag.","https://< IP/DNS>/ ,e.g. https://localhost/","https://< IP/DNS>/oauth2-redirect.html","https://editor.swagger.io/oauth2-redirect.html","https://getpostman.com/oauth2/callback","https://oauth.pstmn.io/v1/callback","Local Postman","Platform configuration","Redirect URI","Set platform redirect URIs on the Authentication page. The table below shows examples of configuration settings.","Single-page application","Syntax","Web","Workflow Manager docs (swager)"]},{"i":"implicit-flow-and-singlemulti-tenancy-settings","l":"Implicit flow and single/multi-tenancy settings","p":["On the same page choose single/multi-tenancy. Based on this setting the parameter --tenant_name is defined in the installation script azure_ad.sh.","For a single-tenant, use Azure AD domain name from AD overview. For multi-tenant use value common. Enabled implicit flow is optional based on specific requirements.","Token config"]},{"l":"API permissions","p":["Client API permissions"]},{"l":"Client secrets","p":["Generate secret and use it as an input parameter for --client_secret flag in the installation script azure_ad.sh. This secret is used in KrakenD azure plugin for translating group id to the group name (human-readable format).","Azure client secrets"]},{"l":"Token claims configuration","p":["Example of encoded JWT token with claims. These claims are transferred to the request header (see KrakenD Azure Plugin docs for more info)."]},{"l":"RBAC configuration","p":["Super user is defined in .env file via ADMIN_GROUP variable."]},{"l":"Workflow Manager","p":["RBAC proxy adds 2 features on top of tenant proxy:","Ensures user authorization to access certain endpoints","Filters workflow definitions and workflow executions based on user's roles, groups and userID","RBAC support simply distinguishes 2 user types: an admin and everyone else. An admin has full access to workflow API while the ordinary user can only:","Read workflow definitions","Ordinary users can only view workflow definitions belonging to the same groups","A workflow definition (created by an admin) can have multiple labels assigned","A user can belong into multiple groups","User groups are identified in HTTP request's header field x-auth-user-roles","If an ordinary user's group matches one of the workflow labels, the workflow becomes visible to the user","Execute visible workflow definitions","Monitor running executions","Only those executed by the user currently logged in","Define user roles in workflow by adding role or group name to description label.","Example: added User.ReadWrite, Role.ReadWrite, Group.ReadWrite labels to workflow description."]},{"l":"Uniconfig","p":["Super-users (based on their role and user groups) can use all REST APIs. Regular users will only be able to use GET REST API requests.","Role","READ (GET REQUEST)","WRITE (ALL REQUEST)","Admin (Superuser)","true","Regular user","false"]},{"l":"Resource Manager","p":["A simple RBAC model is implemented where only super-users (based on their role and user groups) can manipulate resource types, resource pools and labels. Regular users will only be able to read the above entities, allocate and free resources.","Role","READ","WRITE","Admin (Superuser)","true","Regular user","false"]}],[{"l":"Grafana","p":["Grafana is an open source visualization and analytics software. It allows to query, visualize, alert on, and explore metrics, logs, and traces no matter where they are stored.","Grafana Login page","By default, Grafana can be accessed at localhost:3000 or 127.0.0.1:3000","Default credentials are:","Username: frinx Password: frinx123!"]},{"l":"Monitoring","p":["Grafana in FRINX Machine monitors multitude of metrics. At this time, these are:","Device monitoring","FRINX Machine logs","Node monitoring","Swarm monitoring","SSL monitoring","UniConfig-controller monitoring","Workflows monitoring"]},{"l":"Device Monitoring","p":["This dashboard displays data on a specific installed device/node."]},{"l":"FRINX Machine Logs","p":["This dashboard monitors all services running in FRINX Machine. You can filter by individual services, and also look for a specific value.","Logs Monitoring"]},{"l":"FRINX Machine Node Monitoring","p":["This dashboard monitors the state of VM/System where FRINX Machine is running. It reports info like CPU utilisation, Memory utilisation, Disk usage, Up-time etc.","Node Monitoring"]},{"l":"FRINX Machine Swarm Monitoring","p":["This dashboard monitors metrics specifically tied to FM within the VM/System. Metrics like Up-time, Available/Utilised memory, Number of running/stopped containers, CPU usage per container, Memory usage per container, I ncoming/Outcoming network traffic, etc.","Swarm Monitoring"]},{"l":"SSL Monitoring","p":["This dashboard displays data about your SSL certificates. It displays dates until your certificates are valid."]},{"l":"UniConfig Controller Monitoring","p":["This dashboard keeps track of various UniConfig transactions. It displays number of transactions at a given time."]},{"l":"Workflows Monitoring","p":["Collecting data on workflows is being worked on."]}],[{"l":"Demo Use Cases","p":["There are several ways of installing device/devices in FRINX Machine. You can either run a workflow to install a network device directly or you can add devices to your Kibana inventory and install devices from there. From your Kibana inventory, you can install a single device, but you can also install every device in the inventory simultaneously.","To start installing devices open up FRINX UniConfig UI."]},{"l":"Open FRINX UniConfig UI","p":["Open your browser and go to [host_ip] if installed locally go to https://localhost. This is the GUI (UniConfig UI) for managing all of your devices. You should see a screen like this:","FM 2.0 Dashboard","For Demo Use Cases, please download repository fm-workflows","Make sure FRINX-machine is running, navigate to","and execute","Imported workflows and tasks will appear in FRINX-Machine UI, immediately after the import finishes.","In the following articles, you'll learn how to install a device from UniConfig and how to install all devices from the inventory. This inventory is automatically set up for you when you start FRINX Machine. After that we'll learn how to create a loopback address on the devices that we previously stored in the inventory and how to read the journals of these devices.","Then we'll take a look at how to obtain platform inventory data from the devices that you have in the network and how to store them in inventory. Next, you'll learn how to save commands to your inventory and execute them on the devices that are in your network.","Lastly, we'll take a look at how you can add devices to your inventory manually. This might be useful if you wanted to play around with the FRINX Machine a bit a try installing your own networking devices."]}],[{"l":"Add a device to inventory and install it"},{"l":"Adding device to inventory","p":["To add a new device to inventory, click on the Add device button in the Device inventory tab.","Add device to inventory"]},{"l":"JSON examples","p":["New devices are added to inventory by JSON code snippets. They are similar to Blueprints with one addition: device_id must be specified in the snippet.","To add a new device from Blueprint, toggle the \"Blueprints\" switch in the form and choose the blueprint that you want to use."]},{"i":"cisco-classic-ios-cli","l":"Cisco classic IOS (cli)"},{"i":"cisco-ios-xr-netconf","l":"Cisco IOS XR (netconf)"},{"i":"huawei-cli","l":"Huawei (cli)"},{"i":"calix-netconf","l":"CALIX (netconf)"},{"i":"nokia-netconf","l":"Nokia (netconf)"},{"l":"Install the new device from Inventory","p":["Now that the device is added we can install it. We used to need dedicated workflow to install device form inventory, but now it can be done purely via UI. Click on Explore in Explore & configure devices tab, under Device Inventory section.","Install device from inventory","If you did everything correctly, your devices is now in inventory and installed, ready to be operated through Frinx Machine."]}],[{"l":"Creating a Layer 2 VPN Point-to-Point Connection","p":["This section details how to find and execute a prebuilt workflow that creates a Layer 2 VPN Point-to-Point connection within Workflow Manager."]},{"l":"Navigating through Workflow Manager","p":["From the FRINX Machine dashboard you can either select Workflow Manager--> Explore Workflows--> Explore, or select the menu tab in the upper left-hand corner and select Workflow Manager.","You can then search for Create_L2VPN_P2P_OC_uniconfig or scroll down to find it within the inventory of prebuilt workflows.","Frinx Machine Dashboard","Workflows Dashboard","Once you have located the workflow press the Play button to the right of the workflow, this will navigate you to the workflow configuration window."]},{"l":"Configuring the Workflow","p":["Input is pre-filled with following data:","L2 VPN Configuration","Once you have completed, press the Execute button, a numeric link will populate to the left of the Execute button. Click on this numeric link to see the output of the executed workflow.","Numeric Link"]},{"l":"Output of the Executed Workflow","p":["On the Workflows page you will see your executed workflows.","Select the workflow Create_L2VPN_P2P_OC_uniconfig to see the output from all of the tasks completed within this workflow.","Executed Workflow Details","This following sections are available within the output window:","Task Details: This tab gives a detailed list of the individual tasks executed within the conductor, a log of each tasks start and end time, and a status of 'Completed' or 'Failed'.","Input/Output: This is the input of the API call and the results from the API call.","JSON: This tab gives a detailed output in JSON format of all executed tasks from within the workflow. Select the Unescape button to make the output more user-friendly to read.","Edit Rerun: Allows you to make changes to your initial workflow, creating a new workflow without effecting the original.","Execution Flow: A structured map from the conductor lays out the path of tasks executed from start to finish, any forks in the path are also shown here.","If you click on any of the tasks you will receive a pop-up window that gives:","The option to review a summary of input and output of the API call.","JSON output of the completed task with that goes into greater detail about the task execution.","Log status."]},{"l":"Sub-Workflows","p":["Within the original Details of Create_L2VPN_P2P_OC_uniconfig window you will see a sub-workflow.","Sub-Workflow","This sub-workflow is an embedded task that makes a separate API call to Slack to notify a pre-defined user group that the workflow has been executed and whether it has succeeded or failed."]}],[{"l":"FRINX Machine Demo Manual","p":["Open the Frinx Demo at https://services.frinx.io/frinxui/. (Note that Mozilla Firefox is not supported.)","Select Login in the upper-right corner to log into the service. Please contact info@frinx.io for login credentials.","After logging in, you can see the FRINX Machine dashboard:","FRINX Machine dashboard"]},{"l":"Demo Config Manager UI","p":["Using the Demo Config Manager:","On the FRINX Machine main page, select Explore & configure devices.","Make sure that the device you want to configure is installed. If not, select Install first.","For this demo, we use the IOS01 device. Locate the device in the list and select the corresponding gear icon on the right. (If you see a message saying Transaction expired, select Refresh).","FRINX Machine dashboard","For the Loopback0 interface, change the enabled status to false.","Select Save to save your changes.","To review your changes, select Calculate diff.","To view the set of commands used for the change, select Dry run.","To apply changes to the device, select Commit to network. You can also see the changes in the Operational data store.","To revert changes made to the device configuration:","Select Transactions.","Select the Revert icon for your transaction.","Select Revert changes."]},{"l":"Demo workflow UI basics","p":["Workflow Builder is a graphical interface for Workflow Manager and is used to create, modify and manage workflows.","Workflows are groups of tasks and/or sub-workflows that can be used, for example, to install or delete devices, create loopback interfaces on devices, send messages and much more. You can create your own workflows or edit existing ones by adding or removing tasks or sub-workflows.","Every task and sub-workflow placed in a workflow has a unique reference alias, and no two workflows can share a name and version."]},{"l":"How to create a new custom workflow","p":["A translation of what is happening here: \"If the identified device is of the type saos, then extract the name from the output message of the previous task, change the letters to uppercase, extract the version from the output message of the previous task, glue them together and add _1(because that is how devices are named in this demo topology\".","Above every task or workflow there are two icons:","As above, if we enter the username and password directly, the workflow will not ask for credentials at startup.","decision task: Makes a different kind of decision from the lambda task discussed above. This task works like a switch on a track, sending the train one way or another. The data needed to make a decision is supplied by the lambda task.","Device_identification task:","Enter details for the new workflow. Under Name, enter a name for your workflow (note that this name cannot be changed later). The Description is for additional information about the workflow and can be left empty. Label can help you to find your workflow later under Explore workflows, but can also be left empty. Select Save changes when ready.","Enter the following into the body:","Finding your new workflow and running it with multiple different inputs such as 10 000, 10 002, 10 012, etc.","For different ports, you can see different devices with other run commands in memory.","FRINX Machine dashboard","FRINX Machine dashboard FRINX Machine dashboard","If the input value for decision is other, it directs the flow towards device_identification. If the input value is false, it directs the flow towards terminate. This corresponds to the way we connected the cells in the workflow builder.","In the Input parameters tab and the Lambda value field, enter: ${workflow.input.port}. This indicates that the task should work with what was entered in the port field in the input of this workflow. (We will cover this later, in section 7.)","In the Input parameters tab under management_ip, enter sample-topology. This is the name of the topology in this installation, whereas in production you would use a real name. For port, enter ${workflow.input.port}. If you enter a port number manually, the workflow will not ask for one when started (the same goes for management_ip and other fields). However, we want the user to be able to select a port they are interested in, as we did with the lambda task in section 4.","In the Input parameters tab, delete the default parameter foo. For the param parameter, enter ${lambda_IkSu.output.result.value}. (Note that IkSu is an automatically generated reference alias that you must edit to match the one generated for you.) What ${lambda_IkSu.output.result.value} means is to take the value from lambda_xyzq which is in the output, find the result in the output and the value in it.","In the Input parameters tab, enter COMPLETED(or FAILED, at your discretion) in the Termination status field. You can enter whatever message you want in the Expected workflow output field (for example, Device not supported.)","In the Script expression field, enter a small function which we described above.","In this case, if the specified port is both greater than or equal to 10000 and less than 10005, the status chosen is keep working. Otherwise, the status is end. This status is the output of the lambda and the input for the next task or sub-workflow.","lambda task: Makes a decision on which status to choose based on the embedded port. In this example we will only consider ports 10000–10004, and others are ignored. The lambda task lets you enter a small code (lambda - function without name) into the workflow builder.","Like we mentioned above, in this demo workflow we will assume that login credentials are the same everywhere.","Next steps:","Now we can add more tasks. In the left column under System tasks, we can add another lambda. In the Workflows section, you can find Read_journal_cli_device. Let us place them next to each other after Device_identification and concatenate them:","Now we can create a new workflow from scratch:","password: ${workflow.input.password}","Read_journal_cli_device: In the Input parameters tab under device_id, enter ${lambda_ZW66.output.result}.","Remove/Expand:","Save and run your workflow.","Second lambda: Enter ${Device_identificationRef_f7I6.output} as the lambda value, meaning \"take the output from the previous Device_identification task and use that\".","Select Create on the main page of FRINX Machine.","Sub-workflows are similar to classic workflows, but inside of another workflow. The workflow that we are creating can also be used as a building block for another workflow, becoming a sub-workflow itself. In this manner, we can layer and reuse previously created workflows.","terminated task:","The output from Read_journal_cli_device is concatenated with END, as is the output from terminated. Thus we have closed our custom workflow.","Under System tasks, click the + sign for the lambda, decision and terminate tasks. Under Workflows, click the + sign for Device_identification. Tasks and sub-workflows are added on top of each other on the canvas and can be dragged around. To connect all parts of the workflow, hover over IN and OUT where the + sign appears. Connect the parts as follows: START- lambda- decision- (other) to Device_identification and default to terminate. Each task and workflow has a reference alias after its name, which works as unique a identifier.","Update:","username and password: For this demo, we assume that the following login credentials are used on all devices: username: frinx and password: frinx","username: ${workflow.input.username}","When working with devices using different login credentials, you need to be able to change or enter them at startup. This can be achieved in the same way as with the port parameter:"]},{"i":"demo-creating-a-loopback-address-on-devices-stored-in-the-inventory","l":"Demo: Creating a loopback address on devices stored in the inventory","p":["This workflow creates a loopback interface on all devices installed in the inventory or on all devices filtered by labels. Labels are markers that serve as a differentiator.","Check if all devices are installed. You can install them manually or by executing the Install_all_from_inventory / 1 workflow.","FRINX Machine dashboard","On the main page, select Explore workflows. In the Search by keyword column, enter loopback. The Create_loopback_all_in_uniconfig / 1 workflow will appear in the list. Under Actions, select the corresponding Run button for the workflow.","Under loopback_id, insert 77 and select Execute. Click on the link that appears.","All tasks were executed correctly and are completed.","On the results page, you will see five individual tasks:"]},{"l":"INVENTORY_get_all_devices_as_dynamic_fork_tasks","p":["This workflow displays a list of all devices in the inventory or devices filtered by label. It parses the output in the correct format for the dynamic fork, which creates a number of tasks depending on the number of devices in the inventory."]},{"l":"SUB_WORKFLOW","p":["This is the dynamic fork sub-workflow. In this case, it creates UNICONFIG_write_structured_device_data for every individual device in the inventory. You can then get detailed information on the progress and succession of every device."]},{"l":"UNICONFIG_calculate_diff","p":["This remote procedure call creates a difference between the actual UniConfig topology devices and the intended UniConfig topology nodes."]},{"l":"UNICONFIG_dryrun_commit","p":["This remote procedure call resolves the difference between actual and intended device configurations. After all changes are applied, the cli-dryrun journal is read and a remote procedure call output is created and returned."]},{"l":"UNICONFIG_commit","p":["This is the final task that actually commits the intended configuration to the devices."]},{"i":"demo-l3vpn","l":"Demo “L3VPN”","p":["On the FRINX Dashboard, open menu in the top-left corner and select on L3VPN Automation.","Select Services.","Select + Add service.","Fill in the information as shown below. Select the chain icon to automatically generate the VPN ID.","FRINX Machine dashboard","Select Save changes.","You are redirected to the previous page.","Select Commit changes.","Select Commit changes again.","After committing, you can see all executed tasks and sub-workflows. Select Go to detail to review individual processes."]},{"i":"step-1","l":"Step 1.","p":["Navigate back to the L3VPN Automation page.","Select Sites.","Locate the test_site_3b9UQL4i entry.","FRINX Machine dashboard","For test_site_3b9UQL4i, select Manage and Site network access.","Select Add network access."]},{"i":"step-2","l":"Step 2.","p":["BFD Profile: 500ms","Bgp Profiles: 300ms","BTM Circuit Reference: CES00000000-05","Devices: Select one of the CPE devices.","Enter the following settings:","FRINX Machine dashboard","General and Service","IP Connection","Maximum Routes: 2000","Routing Protocol:","Select + Create Static Protocol.","Select Save Changes.","Static Routing Lan Tag: 999","Static Routing LAN: 10.0.0.0/8","Static Routing Next Hop: 10.0.0.1","SVC Input Bandwith (Mbsp): 1000","SVC Output Bandwith (Mbps): 1000","To automatically generate a provider and customer address, select the chain icon:","VPN Attachment: GNS00001002"]},{"i":"step-3","l":"Step 3.","p":["Select Commit Changes.","FRINX Machine dashboard","Wait until all tasks are completed."]}],[{"l":"Install all devices from inventory","p":["When adding multiple devices to your inventory, it can be tedious to install them individually. To make things easier, we have built a workflow to install all devices present in the inventory.","Follow these instructions to use the workflow:","On the landing page, select Workflow Manager. Then select Explore and search for the workflow called Install_all_from_inventory.","Search for install_all_from_inventory","After searching, select the Execute button (blue play icon). A window appears where you can enter the input parameter. This workflow does not require any input if you want to install all uninstalled devices. If you specified a device label when adding devices, you can use this label to determine which devices should be bulk installed. Select \"Execute\" again.","Execute install_all_from_inventory","After you execute, a numeric link appears to the left of the Execute button. The link takes you to a page that shows individual tasks for this workflow, its inputs and outputs, and whether it was successful or unsuccessful. In the \"Input/Output\" tab, you can see both devices that were installed as a result of this workflow and those that were already installed.","Results of the workflow"]}],[{"l":"Policy filter XR","p":["This workflow uses UniConfig to showcase the filtering capabilities of some of our system tasks. It filters through the interfaces of the device, returns the name of the interface based on its description provided by the user and applies chosen policy on that interface.","Supported device: ios-xr -> IOSXR653_1, IOSXR653_2 & IOSXR663_1 not IOS01 & IOS02","Policy creation is not part of this workflow. The chosen policy must exist on the device before running this workflow."]},{"l":"Searching the workflow","p":["Search"]},{"i":"sync--replace","l":"Sync & Replace","p":["We consider it best practice for all workflows that interact with devices to start with the tasks \"Sync from network\" and \"Replace config with oper\". This ensures that the internal databases of the FRINX Machine are in sync with the latest configuration of the device. The input of these tasks is simply the name of the node(device)."]},{"l":"Read device data","p":["The next part is reading the device config. In the UNICONFIG_read_structured_device_data task, you can specify which part of the config you want to read with URI. In this case, we leave the\"URI\" input field empty."]},{"l":"jsonJQ filter","p":["jsonJQ is one of our system tasks that is very useful for filtering data. We use the following query expression:","We search through the whole config, and under the Cisco-IOS-XR-ifmgr-cfg:interface-configurations model we find the interface with a description given by the user. The task returns the name of that interface."]},{"l":"Lambda","p":["Lambda is a generic task that can process any JS code. In this case, we use it to parse the output of the jsonJQ task. jsonJQ returns the name of the interface in a standard decoded format, e.g: \"TenGigE0/0/0/0\". However, we will be using that interface in URI, which means it must be encoded. We can achieve that using a simple JS script:","As an example, we take the interface name TenGigE0/0/0/0 and encode it to TenGigE0%2F0%2F0%2F0."]},{"i":"write--commit","l":"Write & commit","p":["Lastly, we use the output of the lambda task for the configuration. We apply a policy to the interface filtered based on its description."]},{"l":"Example input","p":["Input"]},{"l":"Execution flow"},{"l":"Run of the workflow","p":["Running the workflow","IOSXR653_1 test_map_custom"]}],[{"l":"FRINX UniConfig introduction","p":["The purpose of UniConfig is to manage configuration state and to retrieve operational state of physical and virtual networking devices. UniConfig provides a single API for many different devices in the network. UniConfig can be run as an application on bare metal in a VM or in a container, standalone or as part of our automation solution FRINX Machine. UniConfig has a built-in data store that can be run in memory or with an external database.","UniConfig features"]},{"l":"UniConfig key feature overview","p":["A 'Lazy CLI' feature to suspend and resume connections without having to maintain keepalives","Allows for diffs to be built between actual and intended execution of atomic configuration changes","Can execute commands in parallel on multiple devices","Can read and store proprietary data models from network devices that follow the YANG data model","Choose between NETCONF or RESTCONF to connect to devices","Data export and import via blacklist and whitelist functions","High availability","Offers the ability to do a dry-commit to evaluate the functionality of a configuration before it is executed on devices","Provides snapshots of previous configurations if you need to rollback","Provides subtree filtering capabilities in NETCONF","Provides templates for device configuration","Pushes configuration data to devices via NETCONF or CLI","Python microservices are used to integrate with the FRINX machine","Retrieves and stores current startup and running configuration from mounted network devices","Retrieves operational data from devices via NETCONF or CLI","Subscription to NETCONF notifications via web sockets","Support for 3-phase commit by using NETCONF confirmed-commit","Support for YANG 1.1 and Tail-f actions","Supports PostgreSQL as an external database","The ability to log specific devices as needed","The UniConfig client allows for simple, full-service access to the UniConfig features","The UniConfig UI allows users to interact with the network controller through a web-based user interface","Transactions can be managed on one or multiple devices","Translates between CLI, native model and standard data models (i.e. OpenConfig) via our open-source device library( https://github.com/FRINXio/cli-units)"]},{"i":"uniconfig-enables-users-to-communicate-with-their-network-infrastructure-via-four-options","l":"UniConfig enables users to communicate with their network infrastructure via four options:","p":["Execute & Read API- Unstructured data via SSH and Telnet","OpenConfig API– Translation provided by our open source device library","UniConfig Native API– Direct access to vendor specific YANG data models that are native to the connected devices as well as UniConfig functions (i.e. diff, commit, snapshots, etc.)","UniConfig Native CLI API– Programmatic access to the CLI without the need for translation units (experimental)","Execute & Read capable API: Like Ansible, TCL Scripting or similar products strings can be passed and received through SSH or Telnet via REST API. UniConfig provides the authentication and transportation of data without interpreting it.","OpenConfig API: An API that is translated into device specific CLI or YANG data models. The installation of \"translation units\" on devices is required. FRINX provides an open source library of devices from a variety of network vendors. The open source framework allows anyone to contribute or consume the contents of the expanding list of supported network devices.","UniConfig Native API: A vendor specific YANG data models are absorbed by UniConfig to allow configuration of mounted devices. UniConfig maps vendor specific \"native\" models into it's data store to provide stateful configuration capabilities to applications and users.","UniConfig Native CLI API: Allows for interaction with a devices CLI is programmatic through the API without the use of 'translation units', only a schema file is needed. (This option is currently experimental, contact FRINX for more information.)","UniConfig solution"]},{"l":"UniConfig in a Docker container"},{"l":"Download and activate FRINX UniConfig","p":["Enter the following commands to download, activate and start UniConfig in a Docker container:"]},{"l":"Stop the container","p":["To stop the container type:"]},{"l":"UniConfig as a Java process in a VM or on a host"},{"l":"Download FRINX UniConfig","p":["Click on the link to download a zip archive of the latest FRINX UniConfig: uniconfig-5.0.7.zip By downloading the file you accept the FRINX software agreement: EULA"]},{"l":"Activate FRINX UniConfig","p":["To activate UniConfig, unzip the file, open the directory and run the following command:","For more information on the different arguments run the startup script with the -h flag"]},{"l":"OpenAPI","p":["UniConfig distributions contain '.yaml' file that generates list of all usable RPCs and their examples. You can view it locally or on our hosted version that always shows latest OpenAPI version.","File can be found here:"]},{"l":"Offline Activation","p":["Please contact support@frinx.io for offline activation of UniConfig."]}],[{"l":"User Guide"},{"l":"Basic Concepts","p":["Explanation of basic concepts, principles and mechanisms that exist within UniConfig."]},{"l":"Device Installation","p":["Section that explains device installation process. It covers basic mechanisms that take place when installing and explains parameters that are used in installation along with examples of install request examples. It then covers differences between CLI and NETCONF API."]},{"l":"UniConfig Operations","p":["This section lists various APIs used interact with UniConfig."]},{"l":"UniConfig Procedures","p":["UniConfig operations are actions that are usually inherent to UniConfig and work on their own when set up properly."]},{"l":"SDK","p":["Uniconfig provides a full blown Java based SDK. All Uniconfig operations available over RESTconf are also available when using the SDK."]}],[{"l":"Basic Concepts","p":["UniConfig is a network controller that enables network operators to automate simple and complex procedures in their heterogeneous networks. UniConfig uses CLI, NETCONF and gNMI to connect to network devices and provides a RESTCONF interface on its northbound to provide an API to applications. UniConfig users use clients in various programming languages to communicate from their applications with the controller. FRINX provides a Java client and python workers to integrate with its workflow automation in FRINX Machine. Other clients can be generated from the OpenAPI documentation of the UniConfig API.","UniConfig is stateless and stores all state information before and after transactions in a PostgreSQL database. UniConfig provides transaction capabilities on its northbound API, so that multiple clients can interact with UniConfig at the same time in a well-structured way. In addition, transactions are also supported towards all network devices independent of the capabilities of these devices. Transactions can be rolled back on error automatically and on user demand by specifying a transaction ID from the transaction log. Clients can use an “immediate commit” model (changes sent to the controller get applied to the devices immediately) or a “build and commit” model (changes are staged on the controller until a commit operation pushes all changes in a transaction to one or multiple devices).","To support N+1 redundancy and horizontal scale (meaning adding more controller instances allows the system to serve more network devices and more clients) UniConfig can be deployed together with a load balancer(E.g.: Traefik). The combination of a state-less load balancer and multiple UniConfig instances achieves high availability and supports many network devices and client applications to configure the network.","An open-source device library allows users to connect UniConfig to CLI devices that do not support SDN protocols like NETCONF and gNMI. This library is open to users, independent software vendors and any 3rd party to contribute to and use to achieve their automation goals.","Finally, the UniConfig shell, allows users to interact with all UniConfig operations and the connected devices in a model driven way through CLI.","UniConfig runs in containers, VMs or as application and can be deployed stand-alone or as part of the \"FRINX Machine\" network automation solution."]}],[{"l":"Device installation"},{"i":"device-installation-1","l":"Device installation","p":["Guide explaining installation mechanisms along with both CLI and NETCONF examples."]},{"l":"UniConfig CLI","p":["The CLI southbound plugin enables the Frinx UniConfig to communicate with CLI devices that do not speak NETCONF or any other programmatic API. The CLI service module uses YANG models and implements a translation logic to send and receive structured data to and from CLI devices."]},{"l":"UniConfig Netconf","p":["NETCONF is an Internet Engineering Task Force (IETF) protocol used for configuration and monitoring devices in the network. It can be used to“create, recover, update, and delete configurations of network devices”.NETCONF operations are overlaid on the Remote Procedure Call(RPC) layer and may be described in either XML or JSON."]},{"l":"UniConfig-native CLI","p":["UniConfig-native CLI allows user configuration of CLI-enabled devices using YANG models that describe configuration commands. In UniConfig-native CLI deployment translation units are defined only by YANG models and device-specific characteristics that are used for parsing and serialization of commands. Afterwards, readers and writers are automatically created and provided to translation registry - user doesn’t write them individually. YANG models can be constructed by following of well-defined rules that are explained in Developer Guide.","Network management protocols are used in southbound API of UniConfig Lighty distribution for device installation and communication. Currently, following protocols are supported:","NETCONF (Network Configuration Protocol)","SSH / TELNET"]}],[{"l":"Device installation","p":["Installing is the process of loading device information into UniConfig database. This information is saved in PostgreSQL database and used whenever transaction occurs. When the transaction is finished the connection to device is closed again, until next transaction.","These are the steps of installation process:","creation of UniConfig transaction","creation of mountpoint - connection to device","loading configuration and metadata from mountpoint","closing mountpoint and connection to device","storing synced configuration and metadata to database","closing UniConfig transaction","Node can be installed only once (you will receive error if node has already been installed).","You can specify if you would like to install node on the UniConfig layer. Default value is 'true':","Only 1 node with the same node-id can be installed on UniConfig layer.","It is synchronous: it succeeds only after node is successfully installed it fails in other cases – max-connection-attempts is automatically set to value '1', if different value is not provided in RPC input, database or config file.","Following sections provide deeper explanation of parameters needed for installation, along with example install requests.","Overview of our OpenAPI along with all parameters and expected returns can be found here."]},{"l":"Default parameters","p":["All install parameters (CLI/NETCONF) are set in database when Uniconfig is initializing. Values of these parameters are equal to specific yang model default values. These parameters are used when they are missing in RPC request.","Priority of using install parameters :","Parameter set in install RPC request","Parameter set in database","Default parameter from yang model","Priority of initial writing default parameters into database:","Database already contains default parameters","User defines default parameters into config file","Default values from yang schema file will be saved","Default parameters can be managed (put/read/delete) by user using RESTCONF/Uniconfig-shell.","Definition of default parameters can be also done using config file default-parameters.json. It is placed in config subdirectory together with lighty-uniconfig-config.json.","RPC request - CLI default parameters:","RPC request - NETCONF default parameters:"]},{"l":"Installing CLI device","p":["Install node RPC","List of basic connection parameters that are used for identification of remote device. All of these parameters are mandatory.","node-id- Name of node that represents device in the topology.","cli-topology:host- IP address or domain-name of target device that runs SSH or Telnet server.","cli-topology:port- TCP port on which the SSH or Telnet server on remote device is listening to incoming connections. Standard SSH port is '22', standard Telnet port is '23'.","cli-topology:transport-type- Application protocol used for communication with device - supported options are 'ssh' and 'telnet'.","cli-topology:device-type- Device type that is used for selection of translation units that maps device configuration to OpenConfig models. Supported devices can be found","cli-topology:device-version- Version of device. Use a specific version or * for a generic one. * enables only basic read and write management without the support of OpenConfig models. Here.","cli-topology:username- Username for accessing of CLI management line.","cli-topology:password- Password assigned to username.","uniconfig-config:install-uniconfig-node-enabled- Whether node should be installed to UniConfig and unified layers. By default, this flag is set to 'true'."]},{"l":"Authentication parameters","p":["List of authentication parameters used for identification of remote user utilized for configuration of the device. Username and password parameters are mandatory.","cli-topology:username- Username for accessing of CLI management line.","cli-topology:password- Password assigned to username.","List of parameters that can be used for adjusting of reconnection strategy. None of these parameters is mandatory - if they are not set, default values are set. There are two exclusive groups of parameters based on selected reconnection strategy - you can define only parameters from single group. By default, keepalive strategy is used."]},{"l":"Connection parameters","p":["Following parameters adjust maintaining of CLI session state. None of these parameters are mandatory (default values will be used).","cli-topology:max-connection-attempts- Maximum number of initial connection attempts(default value: 1). If there are unstable devices in the network it might be useful to provide max-connection-attempts higher than the default value. It would try to connect n times before throwing an ssh connection exception.","cli-topology:max-connection-attempts-install- Maximum number of initial connection attempts during install process (default value: 1). If there are unstable devices in the network it might be useful to provide max-connection-attempts-install higher than the default value. It would try to connect n times before throwing an ssh connection exception.","cli-topology:max-reconnection-attempts- Maximum number of reconnection attempts(default value: 1). max-reconnection-attempts is not that necessary to set. Uniconfig does not keep idle sessions open longer than it is necessary."]},{"l":"Storing failed installations","p":["The following parameter allows the user to store the installation in case the device is in some way unreachable.","uniconfig-config:store-failed-installation- If enabled, it will ensure that even if the device is unreachable, it will be stored in the node table in the database. If not set, the default value is false.","When the user sets the flag to true, an additional column called installation-status will be populated with a boolean flag (either SUCCESSFUL for a successful installation, or FAILED for a failed one). This lets the user know that there has been some problem and that the device was not installed correctly. The mount-point information of that node will be stored (unlike with the default value). With this info already stored, the user does not need to reinstall the device, as all the connection information is present in the UniConfig database. Syncing the device or calling a GET Request will try to reconnect to the device and if it is successful, the configuration data will be saved in the datastore and the request will then finish. The installation-status will then change to SUCCESSFUL. The installed device will then behave normally as if the installation was successful in the first place. If the device is still unreachable, the flag will stay FAILED.","This is useful when many devices are being installed in batches and the user doesn't know if they are up or not."]},{"l":"Keepalive strategies","p":["1. Keepalive reconnection strategy","cli-topology:keepalive-delay- Delay between sending of keepalive messages over CLI session. The value should not be set higher than the execution of the longest operation. Default value: 60 seconds.","cli-topology:keepalive-timeout- This parameter defines how much time the CLI layer should wait for a response to keepalive message before the session is closed. Default value: 60 seconds.","cli-topology:keepalive-initial-delay- This parameter defines how much time CLI layer waits for establishment of new CLI session before the first reconnection attempt is launched. Default value: 120 seconds.","The keepalive parameters have two main functions:","keep the idle session open","timeout commands which would block the session forever"]},{"l":"Example of using the connection and keepalive parameters together","p":["For this example let us assume that we are dealing with a prod-like device, which would mean that some devices might have a large config. We would set these parameters:","Connection attempts would give us more flexibility if we work with unstable devices. It would try to ssh 3 times instead of 1 (default value). We should also keep in mind that the process of connecting to a device would take longer because of extra ssh attempts.","Keepalive commands can be set less than time of the installation, because keepalive commands can fit in between of the installation process. An important thing to keep in mind is to set sum of keepalive-delay and keepalive-timeout parameters higher than time of execution of the configuration show command. Otherwise, it could time out during writing out of the configuration to the console. For each type of device it is a different command ( configuration show brief for Ciena devices, show run for Cisco devices, etc.). Assumption is that it should not take more than 240 seconds (sum of keepalive params) to show the whole configuration. This can be appropriately adjusted to our circumstances.","2. Lazy reconnection strategy","command-timeout- Maximal time (in seconds) for command execution. If a command cannot be executed on a device in this time, the execution is considered a failure. Default value: 60 seconds.","connection-establish-timeout- Maximal time (in seconds) for connection establishment. If a connection attempt fails in this time, the attempt is considered a failure. Default value: 60 seconds.","connection-lazy-timeout- Maximal time (in seconds) for connection to keep alive. If no activity was detected in the session and the timeout has been reached, connection will be stopped. Default value: 60 seconds."]},{"l":"Journaling parameters","p":["The following parameters relate with tracing of executed commands. It is not required to set these parameters.","cli-topology:journal-size- Size of the cli mount-point journal. Journal keeps track of executed commands and makes them available for users/apps for debugging purposes. Value 0 disables journaling(it is default value).","cli-topology:dry-run-journal-size- Creates dry-run mount-point and defines number of commands in command history for dry-run mount-point. Value 0 disables dry-run functionality (it is default value).","cli-topology:journal-level- Sets how much information should be stored in the journal. Option 'command-only' stores only the actual commands executed on device. Option 'extended' records additional information such as: transaction life-cycle, which handlers were invoked etc."]},{"l":"Parsing parameters","p":["Parsing strategies are used for:","Recognizing of structure in cached device configuration that is represented in textual format.","Extraction of target sections from structured format of device configuration.","Parsing engine can be configured on creation of mountpoint by specification of parsing-engine leaf value. Currently, there are three supported CLI parsing strategies: tree-parser(default strategy), batch-parser and one-line-parser.","Both batch-parser and tree-parser depend on current implementation of'CliFlavour' which defines device-specific CLI patterns. For example, if 'CliFlavour' doesn't correctly specify format of 'show configuration' command, then neither batch-parser or tree-parser is applied and commands are sent directly to device."]},{"l":"Tree-parser","p":["It is set as default parsing engine in case you choose to not use'parsing-engine' parameter.","Running-configuration is mapped into the tree structure before the first command lookup is executed from translation unit. Afterwards, this tree can be reused in the same transaction for faster lookup process (for example, one 'sync-from-network' task is executed in one transaction).","Tree-parser is faster than batch-parser in most cases because device configuration must be traversed only once and searching for target section in parsed tree structure has only logarithmic time complexity. The longer the device configuration is, the better performance improvement is achieved using this parsing strategy.","Both batch-parser and tree-parser should be capable to parse the same device configurations (in other words, tree-parser doesn't have any functional restrictions in comparison to batch-parser)."]},{"l":"Batch-parser","p":["Running-configuration must be traversed from the beginning each time when new target section is extracted from the configuration (such lookup process is launched from CLI translation units).","Internally, this parser uses regular expressions to recognize structure of configuration and find target section. From this reason, if configuration is long, this batch-parser becomes ineffective to extract sections that are placed near the end of device configuration.","Batch-parser should be used only as fallback strategy in the case when tree-parser fails."]},{"l":"One-line-parser","p":["CLI parsing engine that stores configuration in the cache in the form of blocks and then uses grep function for parsing running-configuration"]},{"l":"Cisco IOX XR Example request"},{"l":"Junos Example request"},{"l":"Uninstalling CLI device","p":["Uninstall node RPC"]},{"l":"Example request"},{"l":"Installing Netconf device"},{"l":"Identification of remote device","p":["List of basic connection parameters that are used for identification of remote device. Only tcp-only parameter must not be specified in input of the request.","node-id- Name of node that represents device / mount-point in the topology.","netconf-node-topology:host- IP address or domain-name of target device that runs NETCONF server.","netconf-node-topology:port- TCP port on which NETCONF server is listening to incoming connections.","netconf-node-topology:tcp-only- If it is set to 'true', NETCONF session is created directly on top of TCP connection. Otherwise,'SSH' is used as carriage protocol. By default, this parameter is set to 'false'."]},{"i":"authentication-parameters-1","l":"Authentication parameters","p":["Parameters used for configuration of the basic authentication method against NETCONF server. These parameters must be specified in the input request.","network-topology:username- Name of the user that has permission to access device using NETCONF management line.","network-topology:password- Password to the user in non-encrypted format.","There are also other authentication parameters if different authentication method is used - for example, key-based authentication requires specification of key-id. All available authentication parameters can be found in netconf-node-topology.yang under netconf-node-credentials grouping."]},{"l":"Session timers","p":["The following parameters adjust timers that are related with maintaining of NETCONF session state. None of these parameters are mandatory(default values will be used).","netconf-node-topology:initial-connection-timeout- Specifies timeout in seconds after which initial connection to the NETCONF server must be established (default value: 20 s).","netconf-node-topology:request-transaction-timeout- Timeout for blocking RPC operations within transactions (default value: 60 s).","netconf-node-topology:max-connection-attempts- Maximum number of connection attempts (default value: 1).","netconf-node-topology:max-reconnection-attempts- Maximum number of reconnection attempts (default value: 0 - disabled).","netconf-node-topology:between-attempts-timeout- Initial timeout between reconnection attempts (default value: 2 s).","netconf-node-topology:reconnenction-attempts-multiplier- Multiplier between subsequent delays of reconnection attempts (default value: 1.5).","netconf-node-topology:keepalive-delay- Delay between sending of keepalive RPC messages (default value: 120 sec).","netconf-node-topology:confirm-commit-timeout- The timeout for confirming the configuration by \"confirming-commit\" that was configured by \"confirmed-commit\". Configuration will be automatically reverted by device if the \"confirming-commit\" is not issued within the timeout period. This parameter has effect only on NETCONF nodes. (default value: 600 sec)."]},{"l":"Capabilities","p":["Parameters related to capabilities are often used when NETCONF device doesn't provide list of YANGs. Both parameters are optional.","netconf-node-topology:yang-module-capabilities- Set a list of capabilities to override capabilities provided in device's hello message. It can be used for devices that do not report any yang modules in their hello message.","netconf-node-topology:non-module-capabilities- Set a list of non-module based capabilities to override or merge non-module capabilities provided in device's hello message. It can be used for devices that do not report or incorrectly report non-module-based capabilities in their hello message.","Instead of defining netconf-node-topology:yang-module-capabilities, we can just define folder with yang schemas netconf-node-topology:schema-cache-directory: folder-name. For more information about using the netconf-node-topology:schema-cache-directory parameter, see RST Other parameters."]},{"l":"UniConfig-native","p":["Parameters related to installation of NETCONF or CLI nodes with uniconfig-native support.","uniconfig-config:uniconfig-native-enabled- Whether uniconfig-native should be used for installation of NETCONF or CLI node. By default, this flag is set to 'false'.","uniconfig-config:install-uniconfig-node-enabled- Whether node should be installed to UniConfig and unified layers. By default, this flag is set to 'true'.","uniconfig-config:sequence-read-active- Forces reading of data sequentially when mounting device. By default, this flag is set to'false'. This parameter has effect only on NETCONF nodes.","uniconfig-config:whitelist- List of root YANG entities that should be read. This parameter has effect only on NETCONF nodes.","uniconfig-config:blacklist- List of root YANG entities that should not be read from NETCONF device due to incompatibility with uniconfig-native or other malfunctions in YANG schemas. This parameter has effect only on NETCONF nodes.","uniconfig-config:validation-enabled- Whether validation RPC should be used before submitting configuration of node. By default, this flag is set to 'true'. This parameter has effect only on NETCONF nodes.","uniconfig-config:confirmed-commit-enabled- Whether confirmed-commit RPC should be used before submitting configuration of node. By default, this flag is set to 'true'. This parameter has effect only on NETCONF nodes.","uniconfig-config:store-failed-installation- Whether the installation should be stored in the database if it fails (e.g. is unreachable). The node will be 'installed' even though it failed and the user has 2 options:","uninstall the device and reinstall it.","call sync-from-network to sync the data from the device."]},{"l":"Flags","p":["Non-mandatory flag parameters that can be added to mount-request.","netconf-node-topology:enabled-strict-parsing- Default value of enabled-strict-parsing parameter is set to 'true'. This may inflicts in throwing exception during parsing of received NETCONF messages in case of unknown elements. If this parameter is set to 'false', then parser should ignore unknown elements and not throw exception during parsing.","netconf-node-topology:enabled-notifications- Default value of enabled-notifications is set to 'true'. If it is set to 'true' and NETCONF device supports notifications, NETCONF mountpoint will expose NETCONF notification and subscription services.","netconf-node-topology:reconnect-on-changed-schema- Default value of reconnect-on-changed-schema is set to 'false'. If it is set to 'true', NETCONF notifications are supported by device, and NETCONF notifications are enabled ('enabled-notifications' flag), the connector would auto disconnect/reconnect when schemas are changed in the remote device. The connector subscribes (right after connect) to base netconf notifications and listens for netconf-capability-change notification","netconf-node-topology:streaming-session- Default value of streaming-session parameter is set to 'false'. NETCONF session is created and optimized for receiving of NETCONF notifications from remote server."]},{"l":"Other parameters","p":["Other non-mandatory parameters that can be added to mount-request.","netconf-node-topology:schema-cache-directory- This parameter can be used for two cases:","Explicitly set name of NETCONF cache directory. If it is not set, the name of the schema cache directory is derived from device capabilities during mounting process.","Direct usage of the 'custom' NETCONF cache directory stored in the UniConfig 'cache' directory by name. This 'custom' directory must exist, must not be empty and also can not use the 'netconf-node-topology:yang-module-capabilities' parameter, because capability names will be generated from yang schemas stored in the 'custom' directory.","netconf-node-topology:dry-run-journal-size- Creates dry-run mount-point and defines number of NETCONF RPCs in history for dry-run mount-point. Value 0 disables dry-run functionality (it is default value).","netconf-node-topology:custom-connector-factory- Specification of the custom NETCONF connector factory. For example, if device doesn't support candidate data-store, this parameter should be set to 'netconf-customization-alu-ignore-candidate' string (default value is \"default\").","netconf-node-topology:edit-config-test-option- Specification of the test-option parameter in the netconf edit-config message. Possible values are 'set', 'test-then-set' or 'test-only'. If the edit-config-test-option is not explicitly specified in the mount request, then the default value will be used ('test-then-set'). See RFC-6241 for more information about this feature.","netconf-node-topology:concurrent-rpc-limit- Defines maximum number of concurrent RPCs, where 0 indicates no limit (it is default value).","There are additional install parameters in our OpenAPI, they can all be found here."]},{"l":"Example netconf request"},{"l":"Uninstalling Netconf device"},{"i":"example-request-1","l":"Example request"},{"l":"Installing SNMP agent"},{"l":"Identification of remote agent","p":["List of basic connection parameters that are used for identification of remote agent.","node-id- Name of node that represents device / mount-point in the topology.","snmp-topology:host- IP address or domain-name of target device where SNMP agent is running.","snmp-topology:port- SNMP port on which SNMP agent is listening to incoming connections."]},{"l":"SNMP parameters","p":["snmp-topology:transport-type- UniConfig currently supports UDP for SNMP communication, with plans to add TCP support in the future.","snmp-topology:snmp-version- UniConfig currently supports V1 and V2c version of the SNMP, with plans to add V3 support in the future.","snmp-topology:connection-retries- Sets the number of retries to be performed before a request is timed out. Default value is 0.","snmp-topology:request-timeout- Timeout in milliseconds before a confirmed request is resent or timed out. Default value is 3000.","snmp-topology:get-bulk-size- The maximum number of values that can be returned in a single response to the get-bulk operation. Default value is 50."]},{"i":"authentication-parameters-2","l":"Authentication parameters","p":["snmp-topology:community-string- UniConfig currently supports only security string as authentication method that is used with V1 and V2c."]},{"l":"Others","p":["snmp-topology:mib-repository- Name of the MIB repository that contains MIB files."]},{"i":"example-request-2","l":"Example request"},{"l":"Uninstalling SNMP agent"},{"i":"example-request-3","l":"Example request"}],[{"l":"UniConfig CLI"},{"l":"Introduction","p":["The CLI southbound plugin enables the Frinx UniConfig to communicate with CLI devices that do not speak NETCONF or any other programmatic API. The CLI service module uses YANG models and implements a translation logic to send and receive structured data to and from CLI devices. This allows applications to use a service model or unified device model to communicate with a broad range of network platforms and SW revisions from different vendors.","Much like the NETCONF southbound plugin, the CLI southbound plugin enables fully model-driven, transactional device management for internal and external OpenDaylight applications. In fact, the applications are completely unaware of underlying transport and can manage devices over the CLI plugin in the same exact way as over NETCONF.","Once we have installed the device, we can present an abstract, model-based network device and service interface to applications and users. For example, we can parse the output of an IOS command and return structured data.","CLI southbound plugin"]},{"l":"Architecture","p":["This section provides an architectural overview of the plugin, focusing on the main components."]},{"l":"CLI topology","p":["The CLI topology is a dedicated topology instance where users and applications can:","install a CLI device,","uninstall a device,","check the state of connection,","read/write data from/to a device,","execute RPCs on a device.","This topology can be seen as an equivalent of topology-netconf, providing the same features for netconf devices. The topology APIs are YANG APIs based on the ietf-topology model. Similarly to netconf topology, CLI topology augments the model with some basic configuration data and also some state to monitor mountpoints."]},{"l":"CLI mountpoint","p":["The plugin relies on MD-SAL and its concept of mountpoints to expose management of a CLI device. By exposing a mountpoint into MD-SAL, it enables the CLI topology to actually access the device's data in a structured/YANG manner. Components of such a mountpoint can be divided into 3 distinct layers:","Service layer - implementation of MD-SAL APIs delegating execution to transport layer.","Translation layer - a generic and extensible translation layer. The actual translation between YANG and CLI takes place in the extensions. The resulting CLI commands are then delegated to transport layer.","Transport layer - implementation of various transport protocols used for actual communication with network devices.","The following diagram shows the layers of a CLI mountpoint:"]},{"l":"Translation layer","p":["The CLI southbound plugin is as generic as possible. However, the device-specific translation code (from YANG data -\\ CLI commands and vice versa), needs to be encapsulated in a device-specific translation plugin. E.g. Cisco IOS specific translation code needs to be implemented by Cisco IOS translation plugin before FRINX UniConfig can manage IOS devices. These translation plugins in conjunction with the generic translation layer allow for a CLI mountpoint to be created."]},{"l":"Device specific translation plugin","p":["Device specific translation plugin is a set of:","YANG models","Data handlers","RPC implementations","that actually","defines the model/structure of the data in FRINX UniConfig","implements the translation between YANG data and device CLI in a set of handlers","(optionally) implements the translation between YANG RPCs and device CLI","The plugin itself is responsible for defining the mapping between YANG and CLI. However, the translation layer into which it plugs in is what handles the heavy lifting for it e.g. transactions, rollback, config data storage etc. Additionally, the SPIs of the translation layer are very simple to implement because the translation plugin only needs to focus on the translations between YANG <-\\ CLI."]},{"l":"Units","p":["In order to enable better extensibility of the translation plugin and also to allow the separation of various aspects of a device's configuration, a plugin can be split into multiple units. Where a unit is actually just a subset of a plugin's models, handlers and RPCs.","A single unit will usually cover a particular aspect of device management e.g. the interface management unit.","Units can be completely independent or they can build on each other, but in the end (in the moment where a device is being installed) they form a single translation plugin.","Each unit has to be registered under a specific device type(s) e.g. an interface management unit could be registered for various versions of the IOS device type. When installing an IOS device, the CLI southbound plugin collects all the units registered for the IOS device type and merges them into a single plugin enabling full management.","The following diagram shows an IOS device translation plugin split into multiple units:","IOS translation plugin"]},{"l":"Transport layer","p":["For now, two transport protocols are supported:","SSH","Telnet","They implement the same APIs, which enables the translation layer of the CLI plugin to be completely independent of the underlying protocol in use. Deciding which transport will be used to manage a particular device is simply a matter of install-request configuration.","The transport layer can be specified using install-request'cli-topology:transport-type' parameter."]},{"l":"Data processing","p":["There are 2 types of data depending on data-store in which data is stored:","Config","Operational","This section details how these data types map to CLI commands.","Just as there are 2 types of data, there are 2 streams of data in the CLI southbound plugin:","It represents user/application intended configuration for the device.","Translation plugins/units need to handle this configuration in data handlers as C(reate), U(pdate) and D(elete) operations. R(ead) pulls this config data from the device and updates the cache on its way back.","Config data","It represents actual configuration on the device, optionally statistics from the device.","Translation plugins/units need to pull these data out of the device when R(ead) operation is requested.","Operational data","RPCs stand on their own and can encapsulate any command(s) on the device."]},{"l":"RPCs provided by CLI layer","p":["There are multiple RPCs that can be used to send commands to a CLI session and optionally wait for command output. The CLI layer also provides one additional RPC for computing configuration coverage by cli-units. To use all of these RPCs, it is required to have an installed CLI device in the 'Connected' state."]},{"i":"rpc-execute-and-read","l":"RPC: Execute-and-read"},{"l":"Description","p":["Execution of the sequence of commands specified in the input. These commands must be separated by the new line - then, each of the command is executed separately.","After all commands are executed, it is assumed, that the original command prompt (prompt that was set before execution of this RPC) appears on the remote terminal.","If the input contains only single command, output of this RPC will contain only output of this command. If input contains multiple commands separated by newline, output of this RPC will be built from command prompts (except the prompt of the first command), input commands and outputs returned from remote terminal."]},{"l":"Example","p":["Following RPC demonstrates listing of all interfaces with configured IP addresses plus listing of available routing protocols that can be enabled from global configuration mode. Since the last entered command is placed in configuration mode (for example, starting with'Router(config)#'), it is required to return back to Privileged EXEC mode (for example, starting with 'Router#') using 'end' command and'no' confirmation to not save changes. Also, 'wait-for-output-timer' is configured to 2 seconds - CLI layer waits for command output returned from device up to 2 seconds.","Remember that the last command prompt must equal to original prompt otherwise CLI session fails on timeout and CLI mountpoint must be recreated.","RPC reply with unescaped output string (output can be easily unescaped with 'printf' linux application):","Description of RPC-request input body fields:","command(mandatory) - The list of commands that are sent to device. Commands must be separated by newline character. Every command-line is executed separately.","wait-for-output-timer(optional) - By default (if this parameter is not set or set to 0), outputs from entered commands are collected after caught echo of the next typed command in CLI session (or command prompt, if the command is the last one from input sequence). Then, the collected output contains output of the previous command + echo of the current command that hasn't been executed by sending newline character yet. This process is simplified by setting'wait-for-output-timer' value. In this case,'waiting-for-command-echo' procedure is not applied, rather next command is executed only after specified number of seconds after which the reply from CLI session should already be available (if it won't be available, then command output will be read after execution of the next command - outputs can be messed up).","error-check(optional) - By default, UC does not check for errors in commands. If error-handling is enabled and an error occurs, RPC will fail."]},{"l":"Wait-for-echo behaviour","p":["The comparison between described wait-for-echo approaches can be demonstrated in the steps of processing 2 command-lines:","'wait-for-output-timer' is not set or it set to value 0","write command 1","wait for command 1 echo","hit enter","write command 2","wait for command 2 echo","read until command prompt appears","'wait-for-output-timer' is specified in request","read output until timeout expires","Even if the 'wait-for-output-timer' is configured, the last output must equal to original command-prompt."]},{"i":"rpc-execute-and-expect","l":"RPC: Execute-and-expect"},{"i":"description-1","l":"Description","p":["It is a form of the 'execute-and-read' RPC that additionally may contain 'expect(..)' patterns used for waiting for specific outputs/prompts. It can be used for execution of interactive commands that require multiple subsequent inputs with different preceding prompts.","The body of 'expect(..)' pattern must be specified by Java-based regular expression typed between the brackets (see https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html","documentation about regular expressions used in Java language).","'expect(..)' pattern can only be used for testing of previous command line output including next command prompt. From this reason, it is also a suitable tool for testing of specific command prompts.","'expect(..)' pattern must be specified on the distinct line. If multiple 'expect(..)' patterns are chained on neighboring lines, then all of them must match previous output (patterns are joined using logical AND operation).","Output of this RPC reflects the whole dialogue between Frinx UniConfig client and remote terminal except the initial command-prompt.","'wait-for-output-timer' parameter can also be specified in this RPC","but in this case, it applies only for non-interactive commands - commands that are not followed by 'expect(..)' pattern. It is possible to mix interactive and non-interactive commands in input command snippet.","If 'expect' pattern doesn't match previous output, Execute-and-expect RPC will fail on timeout (fixed 3 seconds) for reading next input and CLI session will drop immediately."]},{"i":"example-1","l":"Example","p":["The following RPC requests shows execution of interactive command for copying of file from TFTP server. The CLI prompt subsequently ask for source filename and destination filename. These prompts are asserted by'expect(..) pattern. The last 'expect(..) pattern just waits for confirmation about number of copied bytes.","RPC reply with unescaped output string (output can be easily unescaped with 'printf' linux application):","Backslash is a special character that must be escaped in JSON body. From this reason, in the previous example, there are two backslashes proceeding regular-expression constructs.","If 'execute-and-expect' command field doesn't contain any 'expect(..)' patterns, it will be evaluated in the same way like 'execute-and-read' RPC."]},{"i":"rpc-execute-and-read-until","l":"RPC: Execute-and-read-until"},{"i":"description-2","l":"Description","p":["It is form of the 'execute-and-read' RPC that allows to explicitly specify 'last-output' that CLI expect at the end of commands executions (after the last command has been sent to device).","If explicitly specified 'last' output is not found at the end of the output, again, the session will be dropped and recreated similarly to behaviour of 'execute-and-read' RPC."]},{"i":"example-2","l":"Example","p":["The following request shows sending of the configuration snippet for disabling of automatic network summary (RIP routing protocol). After executing of these commands, command prompt is switched to'RP/0/0/CPU0:XR5(config-rip)#' - it is not the same like initial command prompt 'RP/0/0/CPU0:XR5#'. From this reason it is required to return back to initial command prompt by sending of additional commands or specification of 'last-output' as it is demonstrated in this example.","RPC reply with unescaped output string (output can be easily unescaped with 'printf' linux application):","Set 'last-output' is saved within current CLI session - if you send next 'execute-and-read' RPC, it is assumed that the initial and last output is newly configured 'last-output'."]},{"i":"rpc-execute","l":"RPC: Execute"},{"i":"description-3","l":"Description","p":["Simple execution of single or multiple commands on remote terminal. Multiple commands must be separated by newline in the input. The outputs from commands are not collected - output of this RPC contains only status message.","This RPC can be used in cases where it is not necessary to obtain outputs of entered commands.","After all commands are executed, the last output is not checked against expected output."]},{"i":"example-3","l":"Example","p":["The following example demonstrates 'execute' RPC on creation of simple static route and committing of made change.","RPC reply - output contains just status message:"]},{"i":"rpc-config-coverage","l":"RPC: config-coverage"},{"i":"description-4","l":"Description","p":["RPC reads the entire device configuration, determines the coverage of the configuration by translation units and returns simple or complex output. The user can define a preferred output in RPC input. The default is simple output.","Simple output contains one string that consists of all lines of the device configuration. Each line starts with '+' if it is covered or'-' if not and ends with a '\\n' marker.","Complex output contains a list of commands. Each entry in the list includes the following fields:","'covered', which indicates whether the entire command is covered or not. Can be either 'true' or 'false'.","'non-parsable-parts', which is visible only if the entire command is not covered. Contains a list of those command parts that are not covered. If no parts of the command are covered, only contains the word 'ALL'.","'command', which includes the entire command."]},{"l":"Simple output example","p":["RPC reply:"]},{"l":"Complex output example","p":["RPC reply:"]}],[{"l":"UniConfig NETCONF"},{"l":"Overview","p":["NETCONF is an Internet Engineering Task Force (IETF) protocol used for configuration and monitoring of devices in a network. It can be used to“create, recover, update, and delete configurations of network devices”. The base NETCONF protocol is described in RFC-6241.","NETCONF operations are overlaid on the Remote Procedure Call (RPC) layer and may be described in either XML or JSON."]},{"l":"NETCONF southbound plugin"},{"l":"Introduction to southbound plugin and netconf-connectors","p":["The NETCONF southbound plugin is capable of connecting to remote NETCONF devices and exposing their configuration/operational datastores, RPCs and notifications as MD-SAL mount points. These mount points allow applications and remote users (over RESTCONF) to interact with the mounted devices.","In terms of RFCs, the southbound plugin supports:","Network Configuration Protocol (NETCONF) - RFC-6241","NETCONF Event Notifications - RFC-5277","YANG Module for NETCONF Monitoring - RFC-6022","YANG Module Library - draft-ietf-netconf-yang-library-06","NETCONF is fully model-driven (utilizing the YANG modelling language) so in addition to the above RFCs, it supports any data/RPC/notifications described by a YANG model that is implemented by the device.","By mounting of NETCONF device a new netconf-connector is created. This connector is responsible for:","keeping state of NETCONF session between NETCONF client that resides on FRINX UniConfig distribution and NETCONF server (remote network device)","sending / receiving of NETCONF RPCs that are used for reading / configuration of network device","interpreting of NETCONF RPCs by mapping of their content using loaded device-specific YANG schemas","There are 2 ways for configuring a new netconf-connector: NETCONF or RESTCONF. This guide focuses on using RESTCONF."]},{"l":"Spawning of netconf-connectors while the controller is running","p":["To configure a new netconf-connector (NETCONF mount-point) you need to create a node in configuration data-store under 'topology-netconf'. Adding of new node under NETCONF topology automatically triggers data-change-event that at the end triggers mounting process of the NETCONF device. The following example shows how to mount device with node name 'example' (make sure that the same node name is specified in URI and request body under 'node-id' leaf).","This spawns a new netconf-connector with name 'example' which tries to connect to the NETCONF device at '192.168.1.100' and port '22'. Both username and password are set to 'test' and SSH is used as channel for transporting of NETCONF RPCs (if 'tcp-only' leaf is set to 'true', NETCONF application protocol is running directly on top of the TCP protocol).","Right after the new netconf-connector is created, NETCONF layer writes some useful metadata into the operational data-store of MD-SAL under the network-topology subtree. This metadata can be found at:","Information about connection status, device capabilities, etc. can be found there.","You can check the configuration of device by accessing of'yang-ext:mount' container that is created under every mounted NETCONF node. The new netconf-connector will now be present there. Just invoke:","The response will contain the whole configuration of NETCONF device. You can fetch smaller slice of configuration using more specific URLs under'yang-ext:mount' too."]},{"i":"authentification-with-privatepublic-key","l":"Authentification with private/public key","p":["This type of authentification is used when you want to connect to the NETCONF device via private/public key, it is necessary to save public key into device, then put private key into UniConfig and when trying to configure NETCONF mount-point to connect via ssh key and not password.","To accomplish that, follow these steps :","1. Generate private/public key-pair on your local machine","2. Change .pub format into .bin format","3. Copy public key into device directory. Password of the device will be required.","4.(Optional) Check if the public key is on device","5. Import public key to device","6. Log in with private key to device NETCONF subsystem. Passphrase for key will be required.","7. Start UniConfig and insert keystore with private key into it.","RPC request:","8. Create mount-point with key-id","Delete public key","Login to device, remove rsa public key and after that, it is also possible to delete key from device directory."]},{"l":"PKI Data persistence in NETCONF","p":["PKI data is used for authentication of NETCONF sessions with the provided RSA private key. The corresponding public key must be stored on the device side.","Keys are identified using a unique 'key-id'. This key identifier can be specified in the NETCONF installation request.","Keys can be managed using the 'remove-keystore-entry' and 'add-keystore-entry' operations. These RPC calls are part of the UniConfig transaction. Changes are not applied until they are committed by the user or the immediate commit model is used to invoke the operation.","Keys are stored in the UniConfig database. In a clustered environment, all nodes share the same set of keys."]},{"l":"Registration of the new key","p":["The following request demonstrates how to register a new RSA private key with a key-id of 'key1'. The private key must be specified in the PKCS#8 format. The passphrase is optional and must be specified only if the private key is encrypted.","Multiple keys can be registered at once if the user provides a list of the 'key-credential' in the input."]},{"l":"Removing of the existing key","p":["The following example shows how to remove the existing key 'key1' from UniConfig. It is possible to remove multiple keys at once."]},{"l":"Reading list of the existing keys","p":["The following example shows how to read list of the existing keys from UniConfig.","Note: Both 'passphrase' and 'private-key' are additionally encrypted by the UniConfig encryption system to protect confidential data."]},{"l":"Keepalive settings","p":["If the NETCONF session haven't been created yet, the session is tried to be established only within maximum connection timeout. If this timeout expires before NETCONF session is established, underlay NETCONF channel is closed (reconnection strategy will not be started). After the NETCONF session has been successfully created, there are two techniques how the connection state is kept alive:","TCP acknowledgements- NETCONF is running on top of the TCP protocol that can handle dropped packets by decreasing of window size and resending of lost TCP segments. Working TCP connection doesn't imply working state of the application layer (NETCONF session) - keepalive messages are required too.","Explicit NETCONF keepalive messages- Keepalive messages test whether NETCONF server is alive - server responds to keepalive messages within NETCONF RPC timeout.","If TCP connection is dropped or NETCONF server doesn't respond within keepalive timeout, NETCONF launches reconnection strategy. To summarize it all, there are 3 configurable parameters that can be set in mount-request:","Initial connection timeout [seconds]- Specifies timeout in milliseconds after which initial connection to the NETCONF server must be established. By default, the value is set 20 s.","Keepalive delay [seconds]- Delay between sending of keepalive RPC messages to the NETCONF server. Keepalive messages test state of the NETCONF session (application layer) - whether remote side is able to respond to RPC messages. Default keepalive delay is 120 seconds.","Request transaction timeout [seconds]- Timeout for blocking RPC operations within transactions. Southbound plugin stops to wait for RPC reply after this timeout expires. By default, it is set to 60 s.","Example with set keepalive parameters at creation of NETCONF mount-point(connection timeout, keepalive delay and request timeout):"]},{"l":"Reconnection strategy","p":["Reconnection strategies are used for recovering of the lost connection to the NETCONF server. The behaviour of the reconnection can be described by 3 configurable mount-request parameters:","Maximum number of connection attempts [count]- Maximum number of initial connection retries; when it is reached, the NETCONF won't try to connect to device anymore. By default, this value is set to 1.","Maximum number of reconnection attempts [count]- Maximum number of reconnection retries; when it is reached, the NETCONF won't try to reconnect to device anymore. By default, this value is set to 1.","Initial timeout between attempts [seconds]- The first timeout between reconnection attempts in milliseconds. The default timeout value is set to 2000 ms.","Reconnection attempts multiplier [factor]- After each reconnection attempt, the delay between reconnection attempts is multiplied by this factor. By default, it is set to 1.5. This means that the next delay between attempts will be 3 s, then it will be 4,5 s, etc.","Example with set reconnection parameters at creation of NETCONF mount-point - maximum connection attempts, initial delay between attempts and sleep factor:"]},{"l":"Local NETCONF cache repositories","p":["The netconf-connector in OpenDaylight relies on'ietf-netconf-monitoring' support when connecting to remote NETCONF device. The 'ietf-netconf-monitoring' feature allows netconf-connector to list and download all YANG schemas that are used by the device. These YANG schemas are afterwards used by NETCONF southbound plugin for interpretation of RPCs. The following rules apply for maintaining of local NETCONF cache repositories:","By default, for each device type, the separate local repository is prepared.","All NETCONF repositories are backed up by separate sub-directory under 'cache' directory of UniConfig Distribution.","NETCONF device types are distinguished by unique set of YANG source identifiers - module names and revision numbers. For example, if 2 NETCONF devices differ only in revision of one YANG schema, these NETCONF devices are recognized to have different device types.","Format of the name of generated NETCONF cache directory at runtime is 'schema_id', where 'id' represents unique integer computed from hash of all source identifiers. This generation of cache directory name is launched only at mounting of new NETCONF device and only if another directory with the same set of source identifiers haven't been registered yet.","You can still manually provide NETCONF cache directories with another format before starting of UniConfig Distribution or at runtime - such directories don't have to follow 'schema_id' format.","The NETCONF repository can be registered in 3 ways:","Implicitly by mounting of NETCONF device that has NETCONF monitoring capability and another devices with the same type hasn't already been mounted.","At booting of FRINX UniConfig distribution, all existing sub-directories of 'cache' root directory are registered as separate NETCONF repositories.","At runtime, by invocation of 'schema-resources:register-repository' RPC.","Already registered schema repositories can be listed using following request:","It should return list of ODL nodes in cluster with list of all loaded repositories. Each repository have associated list of source identifiers. See the following example of GET request output:"]},{"l":"Local Netconf default cache repository","p":["Before booting of FRINX UniConfig, the user can put the 'default' repository in the ‘cache’ directory. This directory should contain the most frequently missing sources. As mentioned above, if the device supports ‘ietf-netconf-monitoring’ and there is no directory in the'cache' with all sources that the device requires, then NETCONF will generate directory with name ‘schema_id’, where ‘id’ represents unique integer. The generated repository may not contain all required schemas because device may not provide them. In such case, the missing sources will be searched in the 'default' repository and if sources will be located there, generated repository will be supplemented by the missing sources. In general, there are 2 situations that can occur:","Missing imports","The device requires and provides a resource which for its work requires additional resources that are not covered by provided resources.","Source that is not covered by provided sources","The device requires but does not provide a specific source.","note Using the 'default' directory in the 'cache' directory is optional."]},{"l":"Connecting to a device not supporting NETCONF monitoring","p":["NETCONF connector can only communicate with a device if it knows the set of used schemas (or at least a subset). However, some devices use YANG models internally but do not support NETCONF monitoring. Netconf-connector can also communicate with these devices, but you must load required YANG models manually. In general, there are 2 situations you might encounter:","NETCONF device does not support 'ietf-netconf-monitoring' but it does list all its YANG models as capabilities in HELLO message","This could be a device that internally uses, for example,'ietf-inet-types' YANG model with revision '2010-09-24'. In the HELLO message, that is sent from this device, there is this capability reported as the following string (other YANG schemas can be reported as capabilities in the similar format):","The format of the capability string is following:","[NAMESPACE] - Namespace that is specified in the YANG schema.","[MODULE_NAME] - Name of the YANG module.","[REVISION] - The newest revision that is specified in the YANG schema (it should be specified as the first one in the file). note Revision number is not mandatory (YANG model doesn't have to contain revision number) - then, the capability is specified without the'&' and revision too. For such devices you have to side load all device YANG models into separate sub-directory under 'cache' directory (you can choose random name for this directory, but directory must contain only YANG files of one device type).","NETCONF device does not support 'ietf-netconf-monitoring' and it does NOT list its YANG models as capabilities in HELLO message","Compared to device that lists its YANG models in HELLO message, in this case there would be no specified capabilities in the HELLO message. This type of device basically provides no information about the YANG schemas it uses so its up to the user of OpenDaylight to properly configure netconf-connector for this device. Netconf-connector has an optional configuration attribute called'yang-module-capabilities' and this attribute can contain a list of'yang-module-based' capabilities. By setting this configuration attribute, it is possible to override the 'yang-module-based' capabilities reported in HELLO message of the device. To do this, we need to mount NETCONF device or modify the configuration of existing netconf-connector by adding the configuration snippet with explicitly specified capabilities (it needs to be added next to the address, port, username etc. configuration elements). The following example shows explicit specification of 6 capabilities:","Remember to also put the YANG schemas into the cache folder like in the case 1."]},{"l":"Registration or refreshing of NETCONF cache repository using RPC","p":["This RPC can be used for registration of new NETCONF cache repository or updating of NETCONF cache repository. This is useful when user wants to add new NETCONF cache repository at runtime of FRINX UniConfig distribution for device that doesn't support 'ietf-netconf-monitoring' feature. It can also be used for refreshing of repository contents (YANG schemas) at runtime.","The following example shows how to register a NETCONF repository with name 'example-repository'. The name of the provided repository must equal to name of the directory which contains YANG schemas.","If the repository registration or refreshing process ends successfully, the output contains just set 'status' leaf with 'success' value:","On the other side, if the directory with input 'repository-name' does not exist, directory doesn't contain any YANG files, or schema context cannot be built using provided YANG sources the response body will contain 'failed' 'status' and set 'error-message'. For example, non-existing directory name produces following response:","Constraints:","Only the single repository can be registered using one RPC request.","Removal of registered repositories is not supported for now."]},{"l":"Reconfiguring netconf-connector while the controller is running","p":["It is possible to change the configuration of an already mounted NETCONF device while the whole controller is running. This example will continue where the last left off and will change the configuration for the existing netconf-connector after it was spawned. Using one RESTCONF request, we will change both username and password for the netconf-connector.","To update an existing netconf-connector you need to send following request to RESTCONF:","Since a PUT is a replace operation, the whole configuration must be specified along with the new values for username and password. This should result in a '2xx' response and the instance of netconf-connector called 'example' will be reconfigured to use username 'bob' and password'passwd'. New configuration can be verified by executing:","With new configuration, the old connection will be closed and a new one established."]},{"l":"Destroying of netconf-connector","p":["Using RESTCONF one can also destroy an instance of a netconf-connector - NETCONF connection will be dropped and all resources associated with NETCONF mount-point on NETCONF layer will be cleaned (both CONFIGURATION and OPERATIONAL data-store information). To do this, simply issue a request to following URL:","The last element of the URL is the name of the mount-point."]},{"l":"NETCONF TESTTOOL"},{"l":"Testtool overview","p":["NETCONF testtool is the Java application that:","Can be used for simulation of 1 or more NETCONF devices (it is suitable for scale testing).","Uses core implementation of NETCONF NORTHBOUND server.","Provides broad configuration options of simulated devices.","Supports YANG notifications.","NETCONF testtool is available at netconf repository of ODL(
into config/ folder of FRINX UniConfig distribution, this file contains xml paths that should be ignored while removing duplicate nodes from the netconf message","Optional:","put file namespaceBlacklist.txt into config/ folder of FRINX UniConfig distribution, this file contains xml namespaces of the nodes that should be removed from the netconf message","Now UniConfig can be started."]},{"l":"Install SROS device","p":["To install the SROS device run:","Where:","sros: is the name of the device","10.19.0.18: is the IP address of the device","830: is the port number of the device","USERNAME: is the username to access the device","PASSWORD: is the respective password","\"uniconfig-config:uniconfig-native-enabled\": allows to enable installing through UniConfig Native","\"uniconfig-config:install-uniconfig-node-enabled\": allows to disable installing to uniconfig and unified layers","\"uniconfig-config:path\": allows to specify a list of root elements from models present on device to be ignored by UniConfig Native","In case of success the return code is 201."]},{"l":"Check if SROS device is connected","p":["To check if the device is properly connected run:","In case of success the return code is 200, and the response body contains something similar to:"]},{"l":"Check if SROS device configuration is available in UniConfig","p":["To check if the SROS device configuration has been properly loaded in the UniConfig config datastore, run:","In case of success the return code is 200 and the response body contains something similar to:"]}],[{"l":"UniConfig SNMP"},{"l":"Introduction","p":["The SNMP (Simple Network Management Protocol) southbound plugin enables Frinx UniConfig to communicate with an SNMP agent, which is a software module installed on network devices. It collects information about the status, performance, and configuration of these devices.","The SNMP southbound plugin follows a fully model-driven approach, similar to CLI or NETCONF southbound plugins. However, the difference lies in the fact that it uses MIB (Management Information Base) for data modeling instead of YANG."]},{"l":"Architecture","p":["This section provides an architectural overview of the plugin, focusing on the main components."]},{"l":"SNMP topology","p":["The SNMP topology is a dedicated topology instance where users and applications can:","install an SNMP agent,","uninstall an agent,","read device configuration settings or performance metrics"]},{"l":"SNMP mountpoint","p":["The plugin relies on MD-SAL and its concept of mountpoints to expose information about a device. By exposing a mountpoint in MD-SAL, it enables the SNMP topology to access device information in a structured form."]},{"l":"Local SNMP MIB repositories","p":["It is necessary to provide /mibs directory that has to contain:","repository - it is directory that contains mib files. It is possible to use any name.","mib.metadata file - through this file, we inform UniConfig that we have added, removed, or modified some MIB file in the repository. Just insert the repository name and any arbitrary string and UniConfig will update the relevant context for particular repository."]},{"i":"example-of-mibmetadata-file","l":"Example of mib.metadata file"},{"l":"Example of requests","p":["UniConfig currently supports read operation, with plans to add write operation in the future."]},{"l":"GET request"}],[{"l":"Updating installation parameters"},{"l":"Overview","p":["During device installation UniConfig creates a mount-point for this device and stores it in the database. This mount-point contains all parameters set by the user in the installation request. UniConfig supports a feature to update mount-point parameters. It is possible to use it for both NETCONF and CLI nodes."]},{"l":"Show installation parameters","p":["Parameters of the installed devices can be displayed using a GET request on the node. It is necessary to use the right topology. It should return the current node settings. See the following examples:","By default, both NETCONF and CLI topologies have the password parameter encrypted. This can be changed in the corresponding yang schema by adding/removing the extension flag \"frinx-encrypt:encrypt\".","CLI node","Output:","NETCONF node"]},{"l":"Update installation parameters","p":["To update node installation parameters it is possible to use a PUT request with updated request body that is copied from the GET request from the previous section. It is also possible to update single parameter with direct PUT call to specific parameter.","If the password parameter is set to be encrypted, changing it will encrypt the input value.","CLI node","Update multiple parameters. Specifically:","host","dry-run-journal-size","journal-size","Update single parameter:","NETCONF node","keepalive-delay","After these changes, when we use the GET requests from the \"Show installation parameters\" section, then we can see that the parameters have actually been changed. It is also possible to use the GET request for single parameter."]}],[{"l":"UniConfig-native CLI"},{"l":"Introduction","p":["UniConfig-native CLI allows user configuration of CLI-enabled devices using YANG models that describe configuration commands. In UniConfig-native CLI deployment translation units are defined only by YANG models and device-specific characteristics that are used for parsing and serialization of commands. Afterwards, readers and writers are automatically created and provided to translation registry - user doesn't write them individually. YANG models can be constructed by following of well-defined rules that are explained in Developer Guide.","Summarized characteristics of UniConfig-native CLI:","modelling of device configuration using YANG models,","automatic provisioning of readers and writers by generic translation unit,","simple translation units per device type that must define device-characteristics and set of YANG models."]},{"l":"Installation","p":["CLI device can be installed as native-CLI device by adding'uniconfig-config:uniconfig-native-enabled' flag with 'true' value into the mount request (by default, this flag is set to 'false'). It is also required to use tree parsing engine that is enabled by default. All other mount request parameters that can be applied for classic CLI mountpoints can also be used in native-CLI configuration with the same meaning.","The following example shows how to mount Cisco IOS XR 5.3.4 device as native-CLI device with enabled dry-run functionality:","After mounting of CLI node finishes, you can verify CLI mountpoint by fetching its Operational datastore:","You can see that there are some native models included in the'available-capabilities' plus basic mandatory capabilities for CLI mountpoints. Number of supported native capabilities depends on number of written models that are included in native-CLI translation unit for IOS XR 5.3.4, in this case. The only common capability for all native-CLI mountpoints is' http://frinx.io/yang/native/extensions?module=cli-native-extensions'. Sample list of native capabilities:","The synced configuration on UniConfig layer can be verified in the same way as for all types of devices:","Since sample device configuration contains both ACL and interface configuration and native-CLI IOS XR 5.* covers this configuration, the synced data looks like the next output:","The previous sample output corresponds to the following parts of the configuration on the device:"]},{"l":"Architecture","p":["The following section describes building blocks and automated processes that take place in UniConfig-native CLI."]},{"l":"Modules","p":["The following UML diagram shows dependencies between modules from which UniConfig native-cli is built. The core of the system is represented by'native-cli-unit' module in CLI layer that depends on CLI API for registration of units and readers and writers API. On the other side there are CLI-units that extend 'GenericCliNativeUnit'.","Dependencies","Description of modules:","utils-unit and translation-registry-api/spi: CLI layer API which native-cli units depend on. It defines interface for CLI readers/writers, translation unit collector that can be used for registration of native-CLI unit, and common 'TranslateUnit' interface.","native-cli-unit: It is responsible for automatic provisioning and registration of readers and writers (handlers) based on YANG modules that are defined in specific translation units. Readers and writers are initialized only for root container and list schema nodes defined in YANG models. All specific native-CLI units must be derived from abstract class 'GenericCliNativeUnit'.","ios-xr-5-native and junos-17-native: Specific native-CLI units derived from 'GenericCliNativeUnit'. To make native-CLI unit working, it must implement methods that provides list of YANG modules, list of root data object interface, supported device versions, unit name, and CLI flavour."]},{"l":"Registration of handlers","p":["Registration of native-CLI handlers is described by following sequence diagram in detail.","Handlers","Description of the process:","Searching for root schema node: Extraction of the root list and container schema nodes from nodes that are augmented to UniConfig topology.","Building of device template information: Extraction of device template information from imported template YANG modules. This template contains command used for displaying of whole device configuration, format of configuration command, and format of delete command.","Initialization of handlers: Creation of native-CLI config readers and writers or native-CLI list readers and writers in case of list schema nodes.","Registration of handlers: Registration of readers and writers in reader and writer registries. Readers are registered as generic config readers, whereas writers are registered as wildcarded subtree writers.","Since native-CLI readers are not registered as subtree readers, it is possible to directly read only root elements from CLI mountpoint. This constraint is caused by unsupported wildcarded subtree readers in Honeycomb framework."]},{"l":"Functionality of readers","p":["Config readers and config list readers in UniConfig-native CLI are implemented as generic readers that parse device configuration into structuralized format based on registered native-CLI YANG models. These readers are initialized and registered per root data schema node that is supported in native-CLI. The next sequence diagram shows process taken by generic reader on calling 'readCurrentAttributes(..)' method.","Readers","Description of the process:","Creation of the configuration tree: It represents current device configuration by sending of 'show' command which is responsible for displaying of whole device configuration.","Transformation of configuration tree: It is transformed into binding-independent NormalizedNode using 'ConfigTreeStreamReader' component.","Conversion into binding-aware format: Conversion of binding-independent NormalizedNode into binding-aware DataObject and population of DataObject builder by fields from built DataObject.","Configuration is parsed into structuralized form before it is actually transformed into NormalizedNodes (step 1) because of more modular and easier approach. Configuration tree consists of 3 types of nodes:","Command nodes: They are represented by the last identifiers of the commands (command word). These nodes don't have any children nodes.","Section nodes: These nodes are represented by the command word / identifier that opens a new configuration section. Section nodes can have multiple children nodes.","Connector nodes: Connector nodes are similar to section nodes with identifier and multiple possible children nodes. However, they don't open a new configuration section; they represent just one intermediary word in command line.","Example - parsing of interface commands into the tree structure:","Parsing","Detailed description of algorithm for transformation of configuration tree into DOM objects:","Transformation","If some commands are not covered by native-CLI YANG models, the parsing of configuration in readers will not fail - unsupported nodes will be skipped."]},{"l":"Functionality of writers","p":["Config writers and config list writers are responsible for serialization of structuralized data from datastore into series of configuration or delete command lines that are compatible with target device. Native CLI writers are also registered only for root schema nodes on the same paths as readers. The next sequence diagram shows process taken by generic writer on calling 'writeCurrentAttributes(..)' or'deleteCurrentAttributes(..)' method.","Writers","Description of the process:","Conversion into binding-independent format: Conversion of binding-aware DataObject into binding-independent NormalizedNode format. Binding-independent format is more suited for automated traversal and building when the target class types of nodes are not known before compilation of YANG schemas is done.","Generation of command lines: NormalizedNode is serialized using stream writer into configuration buckets that are afterwards serialized into separated command lines. Conversion of configuration buckets into command lines can be customized by different strategies. Currently only the primitive strategy is used - it creates for each leaf command argument the full command line from top root - nesting into configuration modes is not supported. This step is described in detail by next activity diagram.","Generation of configuration or delete command lines: It is done by application of configuration or delete template on command line - for example, JUNOS devices use prefix 'set' for applying of the configuration and prefix 'delete' for removal of configuration from device.","Squashing of command lines into single snippet: This is only optimization step - all command lines are joined together with newline separator.","Sending of command to the device(blocking operation).","Configuration buckets are created as intermediary step because of the modularity and flexibility for application of different serialization strategies in future. There are 3 types of created buckets that are wired with respective schema nodes:","Leaf bucket: Bucket that doesn't have any children but it has a value in addition to the identifier. It is created from LeafNode.","Composite bucket: Bucket with identifier and possibly multiple children buckets. It can be used for following types of DOM nodes: ContainerNode or MapEntryNode.","Delegating bucket: Bucket that doesn't have any identifier, it just delegates configuration to its children buckets. It can be used for nodes that are described by ChoiceNode or MapNode.","Command serialization","The current implementation processes updates in default way - the whole actual configuration is removed and then the whole updated configuration is written back to device. This strategy can cause slow down of the commit operation in case of longer configuration and because of this reason it is addressed as one of the future improvements."]}],[{"l":"UniConfig Operations"},{"i":"sending-and-receiving-data-restconf","l":"Sending and receiving data (RESTCONF)","p":["RESTCONF represents REST API to access datastores and UniConfig operations."]},{"l":"UniConfig Node Manager API","p":["The responsibility of this component is to maintain configuration on devices based on intended configuration. Each device and its configuration is represented as a node in the uniconfig topology and the configuration of this node is described by using OpenConfig YANG models. The Northbound API of Uniconfig Manager (UNM) is RPC driven and provides functionality for commit with automatic rollback and synchronization of configuration from the network."]},{"l":"Device discovery","p":["This component is used to check reachable devices in a network. The manager checks the reachability via the ICMP protocol. Afterwards, the manager is able to check whether various TCP/UDP ports are open or not."]},{"l":"Dry-run Manager API","p":["The manager provides functionality showing CLI commands which would be sent to network element."]},{"l":"Snapshot Manager API","p":["The snapshot manager creates and deletes uniconfig snapshots of actual uniconfig topology. Multiple snapshots can be created in the system."]},{"l":"Subtree Manager API","p":["The subtree manager copies (merge/replace) subtrees between source and target paths."]},{"l":"Templates Manager API","p":["This component is responsible for application of templates into UniConfig nodes."]},{"l":"Transaction Log API","p":["This component is responsible for tracking transactions."]},{"l":"UniConfig Queries","p":["Using this component it is possible to invoke JSONB-path queries on top of the stored configuration."]},{"i":"dedicated-transaction-immediate-commit-model","l":"Dedicated transaction (Immediate Commit Model)","p":["The immediate commit creates new transactions for every call of an RPC. The transaction is then closed so no lingering data will occur."]},{"l":"Utilities","p":["This sub-directory contains UniConfig utilities."]}],[{"l":"JSONB Filtering","p":["Jsonb-filter is a query parameter that is used for filtering data based on one or more parameters. This filter is an effective mechanism for filtering a list of items. Using the jsonb-filter we can retrieve only those list items that meet the defined conditions.","Currently, we have two options of how to use the JSONB filtering functionality."]},{"l":"Database JSONB Filtering","p":["The query parameter is located in the URI. This option is faster because filtering is happening on the database side but this filtering has fewer features."]},{"l":"Application JSONB Filtering","p":["A new Content-Type is added. The query parameter is added in the body. Additional query parameters can be chained (sort by, limit, fields). This request is sent as a POST request. This filtering adds more features, but it is happening on the UniConfig application side which will be slower than the database filtering."]}],[{"l":"Application JSONB Filtering","p":["Application JSONB filtering supports either the dot notation:","or the bracket–notation:"]},{"l":"Jsonb-filter expression","p":["Every filter operation is sent using a POST request. Additionally, a new Content-Type header has been made for application JSONB Filtering. An example can be seen below:","The filter is located in the body of the request, not in the URI. Since it is located in the body, there is no need to escape characters. The body structure looks like this:","If the user wants to filter the list elements based on name, the query filter would look like this:","By default, the filter returns the same output structure as when calling a GET request. There is an option to add the whole parent structure, where the body will look like this:","This will filter out all the elements in the list whose name is foo."]},{"l":"Operators","p":["..",".","[?()]","['' (, '')]","[ (, )]","[start:end]","@","*","$","Array index or indexes.","Array slice operator.","Bracket-notated child or children.","Deep scan. Available anywhere a name is required.","Description","Dot-notated child.","Filter expression. Expression must evaluate to a boolean value.","Operator","Operators mentioned in the table below are used to construct a path.","The current node being processed by a filter predicate.","The root element to query. This starts all path expressions.","Wildcard. Available anywhere a name or numeric are required."]},{"l":"Functions","p":["add an item to the json path output array","append(X)","avg()","concat(X)","Description","Double","Functions can be called at the end of the query path. The input to the function is the output of the path expression. The function output is dictated by the function itself.","Integer","keys()","length()","like input","max()","min()","Operator","Output Type","Provides a concatinated version of the path output with a new item","Provides the average value of an array of numbers","Provides the length of an array","Provides the max value of an array of numbers","Provides the min value of an array of numbers","Provides the property keys (An alternative for terminal tilde ~)","Provides the standard deviation value of an array of numbers","Provides the sum value of an array of numbers","Set","stddev()","sum()"]},{"l":"Filter Operators","p":["!=","<","<=","==","=~",">",">=","A double quote: [?(@.name == \"foo\")]","A single quote: [?(@.name == 'foo')]","anyof","Description","empty","Filters are logical expressions used to filter arrays. A typical filter would be [?(@.age > 18)] where @ represents the current element being processed. More complex filters can be created with logical operators && and ||. String literals must be enclosed by:","in","left (array or string) should be empty","left does not exists in right","left exists in right [?(@.size in ['S', 'M'])]","left has an intersection with right [?(@.sizes anyof ['M', 'L'])]","left has no intersection with right [?(@.sizes noneof ['M', 'L'])]","left is a subset of right [?(@.sizes subsetof ['S', 'M', 'L'])]","left is equal to right (note that 1 is not equal to '1')","left is greater than or equal to right","left is greater than right","left is less or equal to right","left is less than right","left is not equal to right","left matches regular expression [?(@.name =~ /foo.*?/i)]","nin","noneof","Operator","size","size of left (array or string) should match right","subsetof"]},{"l":"Jsonb-filter examples","p":["$..interface[?(@.speed <= $['fast'])]","$..interface[?(@.type =~/.* Csmacd/i)]","$..name","$.ietf-interfaces:interfaces..type","$.ietf-interfaces:interfaces.*","$.ietf-interfaces:interfaces.interface.length()","$.ietf-interfaces:interfaces.interface[-2:]","$.ietf-interfaces:interfaces.interface[-2]","$.ietf-interfaces:interfaces.interface[:2]","$.ietf-interfaces:interfaces.interface[?(@.enabled)]","$.ietf-interfaces:interfaces.interface[?(@.speed >= 10)]","$.ietf-interfaces:interfaces.interface[*].name","$.ietf-interfaces:interfaces.interface[0,1]","$.ietf-interfaces:interfaces.interface[1:2]","$.ietf-interfaces:interfaces.interface[2:]","$.ietf-interfaces:interfaces.interface[2]","All interfaces from index 0 (inclusive) until index 2 (exclusive)","All interfaces from index 1 (inclusive) until index 2 (exclusive)","All interfaces matching regex (ignore case)","All interfaces that are not 'fast'","All interfaces that have the enabled element","All interfaces whose speed is greater or equal than 10","All names","All things under interfaces","Description","Interface number two from tail","JsonPath","Suppose we have the following data, and we want to do some filtering on them.","The first two books","The last two interfaces","The names of all interfaces","The number of interfaces","The second to last book","The third interface","The type of everything"]}],[{"l":"Database JSONB Filtering","p":["The example of using the jsonb-filter query parameter: parent-path?jsonb-filter=expression","PostgreSQL documentation: JSON Functions and Operators"]},{"l":"Jsonb-filter expression","p":["!","!=","{$/Cisco-IOS-XR-ifmgr-cfg:interface-configurations/interface-configuration=%28%23act,GigabitEthernet0/0/0/2%29}","{$/frinx-openconfig-interfaces:interfaces/interface=%28%23MgmtEth0/RP0/CPU0/0%29}","&&","<","<=","<>","==",">",">=","||","Absolute path","Boolean AND","Boolean NOT","Boolean OR","Composite key:","Description","Equality operator","exists","false","Greater-than operator","Greater-than-or-equal-to operator","In this case, a path must be prefixed with $. This path must start with a top-level parent container","In this case, the path must be prefixed with <@>. This path is relative to the parent-path","is unknown","Less-than operator","Less-than-or-equal-to operator","like_regex","Non-equality operator","Non-equality operator (same as !=)","null","Operator","Path","Relative path","Single key:","Sometimes especially absolute paths can contain a key of some item with special characters. In this case it is necessary wrap this key in a special syntax (#example-key-name) and also encode these wrapping symbols - %28%23example-key-name%29. If the key is a composite key, it is necessary to wrap the whole key with these symbols. If the user is not sure if the path contains special characters, it is always recommended to use this special syntax.","starts with","Tests whether the first operand matches the regular expression given by the second operand","The base expression must contain path, operator and value. The jsonb-filter can contain one or more expressions joined with AND(&&) or OR (||) operator. if the && operator is used it must be encoded.","The last element of the jsonb-filter expression is a value based on which the user wants to filter the data.","The path to the data that the users want to filter. The path can be:","true","Value","Value used to perform a comparison with JSON false literal","Value used to perform a comparison with JSON null value","Value used to perform a comparison with JSON true literal","Value/Predicate Description","When the path is constructed then the user can use one of the operators in the table below"]},{"l":"Jsonb-filter examples","p":["1. Examples of using the relative paths in the jsonb-filter","Example of filtering the list of interfaces based on the enabled parameter where the equality operator is used as the operator","Example of filtering the list of interfaces based on the mtu parameter where the less-than is used as the operator","Example of filtering the list of interfaces based on the name parameter where the like_regex is used as the operator","Example of filtering the list of interfaces where a combination of expressions is used","Example of filtering the list of interfaces where the exists operator is used","2. Example of using the absolute path in the jsonb-filter","Example of filtering the list of interfaces based on the name parameter where equality operator is used as the operator. Interface name\"GigabitEthernet0/0/0/2\" is a key value that contains slashes. For this reason, it is necessary to wrap this key into wrapping symbols(#GigabitEthernet0/0/0/) and also encode these symbols%28%23GigabitEthernet0/0/0/2%29."]}],[{"l":"Snapshot Manager","p":["The snapshot manager creates and deletes UniConfig snapshots of actual UniConfig topology. Multiple snapshots can be created in the system.","Snapshots may be used for manual rollback. Manual rollback enables simple reconfiguration of the entire network using one of the previous states saved in snapshots. That means that UniConfig nodes in config datastore are replaced with UniConfig snapshot nodes."]},{"l":"Create snapshot"},{"l":"Delete snapshot"},{"l":"Replace config with snapshot"},{"l":"Obtain snapshot metadata"}],[{"l":"Obtaining snapshots-metadata","p":["Snapshots metadata contains list of created snapshots with the date of creation and list of nodes."]}],[{"l":"RPC create-snapshot","p":["RPC creates a snapshot from the nodes in UniConfig topology. Later, this snapshot can be used for manual rollback. RPC input contains the name of the snapshot topology and nodes that the snapshot will contain. Output of the RPC describes the result of operation and matches all input nodes. You cannot call an RPC with empty target-nodes. If one node failed for any reason, RPC will be fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains the name for the topology snapshot and nodes that the snapshot contains. RPC output contains the result of operation."]},{"l":"Failed Example","p":["The RPC input includes nodes that will be contained in the snapshot, but a snapshot name is missing. RPC output contains the result of the operation."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains a name for the topology snapshot and a node that will be contained in the snapshot. One has not been mounted yet. RPC output contains the result of the operation."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input does not contain the target nodes, so the RPC can not be executed."]}],[{"l":"RPC delete-snapshot","p":["RPC removes the snapshot from CONFIG datastore of UniConfig transaction. RPC input contains the name of the snapshot topology which should be removed. RPC output contains result of the operation."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains the name of the snapshot topology which should be removed. RPC output contains the results of the operation."]},{"l":"Failed example","p":["RPC input contains the name of the snapshot topology which should be removed. The input snapshot name does not exist. RPC output contains the results of the operation."]}],[{"l":"RPC replace-config-with-snapshot","p":["The RPC replaces the nodes in UniConfig topology in the CONFIG datastore with selected nodes from specified snapshot. The RPC input contains the name of the snapshot topology and the target nodes which should replace the UniConfig nodes in the CONFIG datastore. Output of the RPC describes the result of the operation and matches all input nodes. You cannot call an RPC with empty target-nodes. If one node failed for any reason, RPC will be fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. RPC output contains the result of the operation."]},{"l":"Failed Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. The snapshot with name (snapshot2) has not been created yet. RPC output contains the result of the operation."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. The snapshot name is missing in the RPC input. The RPC output contains the result of the operation."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input contains the name of the snapshot topology which should replace nodes from UniConfig topology in the CONFIG datastore and list of nodes from that snapshot. One node is missing in snapshot1 (IOSXRN). RPC output contains the result of the operation."]},{"i":"failed-example-3","l":"Failed Example","p":["RPC input does not contain the target nodes, so RPC can not be executed."]}],[{"l":"Subtree Manager","p":["The subtree manager copies (merge/replace) subtrees between source and target paths in Configuration or Operational datastore of UniConfig. When one of these RPCs is called, Subtree Manager (SM) reads the configuration from the source path and according to type of operation(merge / replace), copies the subtree data to target path. Target path is a parent path UNDER which data is copied. SM also distinguishes type of source / target datastore.","All RPCs support merging/replacing of configuration between two different schemas ('version drop' feature). This feature is handy, when it is necessary to copy some configuration between two mounted nodes that are described by slightly different YANG schemas. The following changes between schemas are tolerated:","Skipping non-existing composite nodes and leaves,","Adjusting namespace and revision in node identifiers, only name of nodes must match with target schema,","Moving nodes between choice and augmentation schema nodes,","Adjusting value format to target type definition of leaf or leaf-list schema node."]},{"l":"RPC copy-one-to-one","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC copy-one-to-many","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC copy-many-to-one","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC calculate-subtree-diff","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC calculate-subtree-git-like-diff","p":["Provides a list of supported operations on subscriptions, includes request examples and workflow diagrams."]},{"l":"RPC bulk-edit","p":["Applies multiple modifications to a list of target nodes. RPC bulk-edit"]}],[{"l":"RPC bulk-edit","p":["The bulk-edit operation can be used to modify multiple configuration subtrees under multiple target nodes from the 'uniconfig', 'templates' or 'unistore' topology (the same list of modifications are applied to all listed target nodes). The bulk-edit operation is executed atomically - either all modifications are applied on all target nodes successfully, or the operation fails and the configuration is not touched in the UniConfig transaction. This RPC also benefits from parallel processing of changes per target node."]},{"l":"RPC input","p":["RPC input specifies a list of target nodes and a list of modifications that must be applied under target nodes:","Description of input fields:","topology-id(mandatory): Identifier for the topology which contains all target nodes. Currently supported topologies: uniconfig, templates, unistore.","node-id(optional): List of target nodes identifiers residing in the specified topology. If this field is not specified or is empty, RPC is executed on all available nodes in the specified topology.","edit(mandatory with at least 1 entry): List of modifications. Each modification is uniquely identified by the 'path' key. Modifications are applied in the preserved user-defined order.","Description of fields in the edit entry:","path(mandatory): Path encoded using the RFC-8040 format. Specified as relative path to root'configuration' container. If this leaf contains a single character '/', the path points to the whole configuration. If this path contains a list node without key, the operation is applied to all list node elements.","operation(mandatory): Operation that must be executed on the specified path. Supported operations are 'merge', 'replace', and 'remove'. Operations 'merge' and 'replace' requires to also specify input 'data'.","data(optional): Content of the replaced or merged data without wrapping parent element(the last element of the path is not declared in the 'data', see examples on how to correctly specify content of this leaf in different use-cases).","Supported operations:","merge: Supplied value is merged with the target data node.","replace: Supplied value is used to replace the target data node.","remove: Delete target node if it exists."]},{"l":"RPC output","p":["RPC output contains the global status of the executed operation and per-node status.","Description of output fields:","overall-status: Status of operation. If RPC execution fails on at least one of the target nodes, the overall status is set to 'fail'. Otherwise, status is set to 'complete'.","error-message: \"Reason for the failure. Used if there is a structural error in the RPC input that does not relate to one specific target node.\"","node-result: Results of RPC execution divided per target node ('node-id' is the key of the list).","Description of fields in the node-result entry:","node-id: Identifier for the target node.","status: Status of bulk-edit operation on this node. This value is set to 'complete' only if all modifications have been successfully written into UniConfig transaction (including other nodes). Otherwise, the value is set to 'fail'.","error-message: Reason for the failure. This field appears in the output only if RPC execution failed on this target node.","error-type: Categorized error type."]},{"l":"RPC examples"},{"l":"Successful example","p":["The following request demonstrates the application of six (6) modifications to four (4) templates:","Replace the value of the 'description' leaf.","Remove the 'snmp' container.","Replace the whole 'ssh' container.","Merge the configuration of the 'routing-protocol' list entry.","Merge the whole 'tree' list with the specified multiple list entries.","Replace the leaf-list 'services' with the provided array of strings.","The response contains the overall status 'complete' and per-node status 'complete' - all modifications have been successfully written into the UniConfig transaction."]},{"l":"Failed example","p":["The following example demonstrates the execution of a bulk-edit operation that fails on parsing one of the paths using YANG schemas of the device 'dev02'.","The RPC response contains the overall status 'fail'. There is one error message in the result of 'dev02'. Note that the 'dev01' result also contains the 'fail' status, as modifications have not been written to this node since another node ('dev02') failed during execution of the operation."]}],[{"l":"RPC calculate-subtree-diff","p":["This RPC creates a diff between the source topology subtrees and target topology subtrees. Supported features:","Comparison of subtrees under same network-topology node.","Comparison of subtrees between different network-topology nodes that use same YANG schemas.","Comparison of subtrees with different revisions of YANGs schema that are syntactically compatible(for example, different software versions of devices).","RPC input contains data-tree paths ('source-path' and 'target-path') and data locations('source-datastore' and 'target-datastore'). Data location is the enumeration of two possible values, 'OPERATIONAL' and 'CONFIGURATION'. The default value of 'source-datastore' is 'OPERATIONAL' and default value of 'target-datastore' is 'CONFIGURATION'.","RPC output contains a list of differences between source and target subtrees.","RPC calculate-subtree-dif"]},{"l":"RPC Examples"},{"i":"successful-example-computed-difference","l":"Successful example: Computed difference","p":["RPC calculate-subtree-diff input has a path to two different testtool devices with different YANG schemas. Output contains a list of statements representing the diff."]},{"i":"successful-example-no-difference","l":"Successful example: No difference","p":["The following output demonstrates a situation with no changes between specified subtrees."]},{"i":"failed-example-invalid-value-in-input-field","l":"Failed example: Invalid value in input field","p":["RPC calculate-subtree-diff has an improperly defined datastore (AAA) within the input. Output describes the Allowed values [CONFIGURATION, OPERATIONAL]."]},{"i":"failed-example-missing-mandatory-field","l":"Failed example: Missing mandatory field","p":["RPC input does not contain the mandatory source path."]}],[{"l":"RPC calculate-subtree-git-like-diff","p":["This RPC creates a diff between the source topology subtrees and target topology subtrees. Supported features:","Comparison of subtrees under same network-topology node.","Comparison of subtrees between different network-topology nodes that use same YANG schemas.","Comparison of subtrees with different revisions of YANGs schema that are syntactically compatible(for example, different software versions of devices).","RPC input contains data-tree paths ('source-path' and 'target-path') and data locations('source-datastore' and 'target-datastore'). Data location is the enumeration of two possible values, 'OPERATIONAL' and 'CONFIGURATION'. The default value of 'source-datastore' is 'OPERATIONAL' and default value of 'target-datastore' is 'CONFIGURATION'.","RPC output contains differences between source and target subtrees formatted in a git-like style. The changes are grouped by root entities in the configuration."]},{"l":"RPC Examples"},{"i":"successful-example-computed-difference","l":"Successful example: Computed difference","p":["RPC calculate-subtree-git-like-diff input includes the path to two interfaces on different nodes. Both data locations are placed in the CONFIGURATION datastore. Output contains a list of all the changes. Multiple changes that occur under the same root element are merged together."]},{"i":"successful-example-no-difference","l":"Successful example: No difference","p":["The following output demonstrates a situation with no changes between specified subtrees."]},{"i":"failed-example-missing-mandatory-field","l":"Failed example: Missing mandatory field","p":["RPC input does not contain the mandatory target path."]}],[{"l":"RPC copy-many-to-one","p":["RPC input contains:","type of operation - 'merge' or 'replace',","type of source datastore - CONFIGURATION / OPERATIONAL,","type of target datastore - CONFIGURATION / OPERATIONAL,","list of source paths in RFC-8040 URI formatting,","target path in RFC-8040 URI formatting (target path denotes parent entities under which configuration is copied).","Target datastore is optional input field. By default, it is the same as source datastore. Other input fields are mandatory, so it is forbidden to call RPC with missing mandatory field. Output of RPC describes result of copy to target path RPC. If one path failed for any reason, RPC will be failed overall and no modification will be done to datastore - all modifications are done in the single atomic transaction.","Description of RPC copy-many-to-one is on figure below."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The following example demonstrates execution of copy-many-to-one RPC with 3 source paths. Data that is described by these source paths('snmp', 'access', and 'ntp' containers under three different nodes) will be copied under root 'system:system' container ('dev04' node)."]},{"l":"Failed example","p":["The following example shows failed copy-many-to-one RPC. One of the source paths points to non-existing schema node ('invalid:invalid')."]}],[{"l":"RPC copy-one-to-many","p":["RPC input contains:","type of operation - 'merge' or 'replace',","type of source datastore - CONFIGURATION / OPERATIONAL,","type of target datastore - CONFIGURATION / OPERATIONAL,","source path in RFC-8040 URI formatting, list of target paths in RFC-8040 URI formatting (target paths denote parent entities under which configuration is copied).","Target datastore is optional input field. By default, it is the same as source datastore. Other input fields are mandatory, so it is forbidden to call RPC with missing mandatory field. Output of RPC describes result of copy to target paths RPC. If one path failed for any reason, RPC will be failed overall and no modification will be done to datastore - all modifications are done in the single atomic transaction.","Description of RPC copy-one-to-many is on figure below."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The following example demonstrates merging of ethernet interface configuration from single source into interfaces 'eth-0/2' (node'dev02'), 'eth-0/3' (node 'dev02'), 'eth-0/100' (node 'dev03'), and'eth-0/200' (node 'dev03')."]},{"l":"Failed example","p":["The next example shows failed copy-one-to-many RPC - both target paths are invalid since 'ext' list schema nodes doesn't contain'interfaces:interfaces' child container."]}],[{"l":"RPC copy-one-to-one","p":["RPC input contains:","type of operation - 'merge' or 'replace',","type of source datastore - CONFIGURATION / OPERATIONAL,","type of target datastore - CONFIGURATION / OPERATIONAL,","source path in RFC-8040 URI formatting,","target path in RFC-8040 URI formatting (target path denote parent entities under which configuration is copied).","Target datastore is optional input field. By default, it is the same as source datastore. Other input fields are mandatory, so there is forbidden to call RPC with missing mandatory field. Output of RPC describes result of copy to target path operation. If RPC failed for some reason, RPC will be failed and no modification will be done to datastore.","Description of RPC copy-one-to-one is on figure below."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The following example demonstrates coping of whole 'org:orgs' container from 'dev01' to 'dev02' node under 'uniconfig' topology. Replace operation is used."]},{"l":"Failed example","p":["The following example shows failed copy-one-to-one RPC. Input contains specified source datastore (target datastore is the same), merge operation, source path, and target path. In that example target path is invalid, because it doesn't contain 'org:orgs' container in the schema tree."]}],[{"l":"Transaction Log","p":["The transaction log consists of a transaction tracker and a revert-changes RPC. The transaction tracker stores information called transaction-metadata about performed transactions into the operational snapshot. Whereas revert-changes RPC can be used to revert changes that have been made in a specific transaction. A user only need to have ID of transaction for that. One or more transactions can be reverted using one revert-changes RPC."]},{"l":"RPC revert-changes"},{"l":"Transaction tracker"}],[{"l":"RPC revert-changes","p":["This RPC revert changes that were configured within one transaction. If a user wants to revert single transaction or multiple transactions, he must find out transaction-ids and paste them into the body of RPC. The transaction-id is part of the transaction-metadata, that is created by a transaction tracker after commit/checked-commit RPC.","RPC revert-changes updates data only in the CONFIGURATION Snapshot. If we want to write reverted data to the device, we must use RPC commit after RPC revert-changes."]},{"l":"Ignore non-existent nodes","p":["If a user wants to revert multiple transactions, some transactions metadata may contain nodes that do not currently exist in UniConfig. In this case, the RPC fails. The user has a choice of two options:","remove transaction that contain non-existent nodes from the request body","add 'ignore-non-existing-nodes' parameter to the RPC request body with a value of 'true' (default: 'false')","If the user does not use the 'ignore-non-existing-nodes' parameter, the default value 'false' is automatically used."]},{"l":"RPC Examples"},{"l":"Successful examples","p":["Before reverting a transaction we need to know its ID. We will use the GET request to display all stored transaction-metadata.","Reverting changes of a single transaction.","Reverting changes of multiple transactions.","Reverting changes of multiple transactions, where the transaction with id '2c4c1eb5-185a-4204-8021-2ea05ba2c2c1' contains non-existent node'R1'. In this case 'ignore-non-existing-nodes' with a value of 'true' is used, and therefore the RPC will be successful."]},{"l":"Failed example","p":["This is a case when revert-changes request contains a non-existent transaction in the request body.","Reverting changes of multiple transactions, where the transaction metadata with id '2c4c1eb5-185a-4204-8021-2ea05ba2c2c1' contains non-existent node. In this case 'ignore-non-existing-nodes' with a value of 'false' is used, and therefore the RPC fails."]}],[{"l":"Transaction tracker"},{"l":"Introduction","p":["The transaction tracker is responsible for saving a transaction-metadata to the operational snapshot after successfully executed commit/checked-commit RPC. The transaction-metadata contains information about performed transactions, such as:","transaction-id- Identifier of transaction.","type-of-commit-time- Timestamp of either 'last-commit-time', when the transaction was successful or 'failed-commit-time', when the transaction failed. If multiple devices are configured, then the 'last-commit-time' will contain the timestamp of the last update on the last device.","metadata- Items in this field represent nodes that have been configured in the one transaction. Each item contains a diff item with additional information.","diff- Items in this field are a specific changes. Each item contains path to changes, data before change and data after change. In case of a failed transaction this information in not present.","topology- On which topology is a node installed. Can be 'uniconfig' or 'unistore'.","Data-before is visible only if data was updated or deleted. Data-after is visible only if data was updated or created.","transaction-tracker]"]},{"l":"Configuration","p":["The UniConfig stores transaction metadata only if the'lighty-uniconfig-config.json' file contains a \"maxStoredTransactions\" parameter in \"transactions\" container and its value is greater then 0. It is necessary to make this setting before running UniConfig, otherwise parameter \"maxStoredTransactions\" will be '0' (default value) and transaction-log will be disabled."]},{"l":"Show transaction-metadata","p":["The response to this GET request contains all stored transaction-metadata, their IDs and other items such as node-id, updated data before update and after update, etc."]}],[{"l":"UniConfig Node Manager","p":["An additional git like diff RPC was created so it shows all the changes grouped under root elements in a git-like style.","In the case where the configuration of one device fails, the UNM executes automatic rollback where the previous configuration is restored on all modified devices.","RPC calculate-diff","RPC calculate-git-like-diff","RPC check-installed-nodes","RPC checked-commit","RPC commit","RPC compare-config","RPC get-installed-nodes","RPC health","RPC install-multiple-nodes","RPC is-in-sync","RPC replace-config-with-operational","RPC sync-from-network","RPC uninstall-multiple-nodes","RPC validate","Synchronization from the network reads configuration from devices and stores it as an actual state to the OPER DS.","The responsibility of this component is to maintain configuration on devices based on intended configuration. Each device and its configuration is represented as a node in the uniconfig topology and the configuration of this node is described by using OpenConfig YANG models. The Northbound API of Uniconfig Manager (UNM) is RPC driven and provides functionality for commit with automatic rollback and synchronization of configuration from the network.","When a commit is called, the UNM creates a diff based on intended state from CONFIG DS and actual state from OPER DS. This Diff is used as the basis for device configuration. UNM prepares a network wide transaction which uses Unified mountpoints for communication with different types of devices."]}],[{"l":"RPC calculate-diff","p":["This RPC creates a diff between the actual UniConfig topology nodes and the intended UniConfig topology nodes. The RPC input contains a list of UniConfig nodes to calculate the diff. Output of the RPC contains a list of statements representing the diff. It also matches all input nodes. If RPC is called with empty list of target nodes, diff is calculated for each modified node in the UniConfig transaction. If some node fails for any reason, the RPC fails entirely"]},{"l":"RPC Examples"},{"l":"Successful Example","p":["The RPC calculate-diff input has two target nodes and the output contains a list of statements representing the diff."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC calculate-diff input does not contain the target nodes, calculate-diff will be invoked on top of all touched nodes in the transaction.","or"]},{"i":"successful-example-2","l":"Successful Example","p":["The RPC calculate-diff input has target node and there is no diff."]},{"l":"Failed Example","p":["The RPC calculate-diff input has target node. Nodes 'R2' has not been installed yet. The output describes the result of the calculate-diff RPC."]},{"i":"failed-example-1","l":"Failed Example","p":["The RPC calculate-diff input has two target nodes. One of the nodes,'R2', has not been installed yet. The output describes the result of the calculate-diff RPC."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error.","or"]}],[{"l":"RPC calculate-git-like-diff","p":["This RPC creates a diff between the actual UniConfig topology nodes and the intended UniConfig topology nodes. The RPC input contains a list of UniConfig nodes to calculate the diff. Output of the RPC contains a list of statements representing the diff in a git-like style. It checks for every touched node in the transaction if target nodes are not specified in the input. If some node fails, the RPC will fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["The RPC calculate-git-like-diff input has two target nodes and the output contains a list of statements representing the diff."]},{"i":"successful-example-1","l":"Successful Example","p":["The RPC calculate-git-like-diff input has no target nodes specified, so it will look for all touched nodes in the transaction, and the output will contain a list of all changes on different paths. Multiple changes that occur under the same path are merged together."]},{"i":"successful-example-2","l":"Successful Example","p":["The RPC calculate-git-like-diff input has target node and there is no diff."]},{"l":"Failed Example","p":["The RPC calculate-git-like-diff input has target node. Nodes 'R2' has not been installed yet. The output describes the result of the calculate-diff RPC."]},{"i":"failed-example-1","l":"Failed Example","p":["The RPC calculate-git-like-diff input has two target nodes. One of the nodes,'R1', has not been installed yet. The output describes the result of the calculate-git-like-diff RPC."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC check-installed-nodes","p":["This RPC checks if devices included in the input are installed by looking for the database content of each device. If content is found, the device is installed."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains a device while no devices are installed."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains devices (R1 and R2) and device R1 is installed."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains devices (R1 and R2) and both devices are installed."]},{"l":"Failed Example","p":["RPC input does not specify any nodes."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input is missing the target-nodes container."]}],[{"l":"RPC checked-commit","p":["The trigger for execution of the checked configuration is RPC checked-commit. A checked commit is similar to an RPC commit, but it also checks if nodes are in sync with the network before it starts configuration. RPC fails if any node is out of sync. Output of the RPC describes the result of the commit and matches all modified nodes in the UniConfig transaction. If one node failed for any reason, RPC will fail entirely.","In comparison to commit RPC, there is one additional phase between 'lock and validate configured nodes' and 'write configuration into device' phases:","Lock and validate configured nodes","Check if nodes are in-sync with state on devices","Write configuration into device","Validate configuration","Confirmed commit","Confirming commit (submit configuration)","Following diagram captures check if configuration fingerprints in the transaction datastore and device are equal.","There is a difference between fingerprint-based validation in the phases 1 and 2. The goal of the first phase is validation if other transaction has already changed the same node by comparison of fingerprint in the UniConfig transaction and in the database. On the other side, the second phase validates if fingerprint in the transaction equals to fingerprint on the device - if another system / directly user via CLI has updated device configuration since the beginning of the transaction."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["Configuration of nodes 'R1' and 'R2' has been changed in the transaction. Both 'R1' and 'R2' are in-sync with actual state on the device. RPC checked-commit input invoke all touched nodes."]},{"l":"Failed Example","p":["Configuration of nodes 'R1' and 'R2' has been changed in the transaction. Both 'R1' and 'R2' are in-sync with actual state on the device. Node 'R1' has failed due to improper configuration. The output describes the result of the checked-commit RPC."]},{"i":"failed-example-1","l":"Failed Example","p":["Configuration of nodes 'R1' has been changed in the transaction. Node 'R1' is in-sync with actual state on the device. Node 'R1' has failed on the changed fingerprint. The output describes the result of the checked-commit."]},{"i":"failed-example-2","l":"Failed Example","p":["Node 'R2' has lost connection."]},{"i":"failed-example-3","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC commit","p":["1. Lock and validate configured nodes","2. Write configuration into device","3. Validate configuration","4. Confirmed commit","5. Confirming commit (submit configuration)","Configuration phase","Confirmed commit","Confirmed commit - It is used for locking of device configuration, so no other transaction can touch this device. This phase can be skipped with \"do-confirmed-commit\" flag.","Confirming commit","Confirming commit (submit configuration) - Persisting all changes on devices and in the PostgreSQL database. UniConfig transaction is closed.","If one of the nodes uses a confirmed commit (phase 4), which does not fail, then it is necessary to issue the submitted configuration (phase 5) within the timeout period. Otherwise, the node configuration issued by the confirmed commit will be reverted to its state before the confirmed commit (i.e. confirmed commit makes only temporary configuration changes). The timeout period is 600 seconds (10 minutes) by default, but the user can change it in the installation request.","Lock and validate configured nodes - Locking all modified nodes using PostgreSQL advisory locks and validation of fingerprints - if another transaction tries to commit overlapping nodes or different transaction has already changed one of the nodes, then commit will fail at this step.","Locking nodes","Next diagram describe the first phase of commit RPC - locking of changes nodes in the PostgreSQL database and verification if other transaction has already committed overlapping nodes.","Next diagrams describe all 5 commit phases in detail:","Rollback - It is used for restoring of configuration to previous state, if the configuration process fails. When configuring more devices in a single transaction and the process fails on one particular device, the rollback procedure will be applied to all touched devices. This is done by auto rollback procedure, which is by default turned on. It can be switched off by setting up'do-rollback' flag in input of Commit RPC request. Then only failed devices will be rollbacked.","Rollback operation","RPC commit Commit invoke all touched nodes in transaction. There are no target nodes in the RPC input.","The 'skip-unreachable-nodes' flag controls whether unreachable nodes are skipped when the RPC commit is sent. If set to 'true', nodes that are not reachable are skipped and others are configured. The default value is 'false'.","The configuration of nodes consists of the following phases:","The external application stores the intended configuration under nodes in the UniConfig topology. The trigger for execution of configuration is an RPC commit. Output of the RPC describes the result of the commit.","The last diagram shows rollback procedure that must be executed after failed commit on nodes that have already been configured and don't support 'candidate' datastore.","The third and fourth phases take place only on the nodes that support these operations. If one node failed in the random phase for any reason the RPC will fail entirely. After commit RPC, UniConfig transaction is closed regardless of the commit result.","Validate configuration - Validation of written configuration from the view of constraints and consistency. This phase can be skipped with \"do-validate\" flag.","Validation phase","Write configuration into device - Pushing calculated changes into device without committing of these changes."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["UniConfig commits nodes 'R1' and 'R2' that has been changed in the actual transaction."]},{"i":"successful-example-1","l":"Successful Example","p":["Nodes 'R1' and 'R2' has been changed. RPC commit input has the flag to disable confirmed-commit phase. UniConfig commits all touched nodes."]},{"i":"successful-example-2","l":"Successful Example","p":["If there are not any touched nodes, the request will finish successfully."]},{"l":"Failed Example","p":["Node 'R1' has failed because of failed validation phase."]},{"i":"failed-example-1","l":"Failed Example","p":["Node 'R1' has failed because the confirmed commit failed. Validation phase was skipped due to false \"do-validate\" flag."]},{"i":"failed-example-2","l":"Failed Example","p":["Node 'R1' has failed because of the time delay between the confirmed commit and the submitted configuration."]},{"i":"failed-example-3","l":"Failed Example","p":["Node 'R1' has failed due to improper configuration."]},{"i":"failed-example-4","l":"Failed Example","p":["Node 'R1' has lost connection."]},{"i":"failed-example-5","l":"Failed Example","p":["Node 'R1' has failed because of wrong configuration. In this case validation, confirm-commit and auto-rollback were switched off. Because auto-rollback is switched off, configuration of 'R1' device was successful. However, this can be done only if validation and confirm-commit phase were successful or skipped, otherwise configuration of 'R1' device would also fail."]},{"i":"failed-example-6","l":"Failed Example","p":["Configuration of nodes 'R1' nad 'R2' has been changed in the transaction and both are in-sync with actual state on the device. Then connection of node 'R2' has been lost. RPC commit input has the flag to skip unreachable nodes set to true. Result of the commit RPC describes success of 'R1' node and shows list of unreachable nodes."]}],[{"l":"RPC compare-config","p":["This RPC is a combination of the sync-from-network and calculate-diff RPCs. If one of those RPCs fails, this one also fails with no changes made.","The purpose of this RPC is to synchronize configurations from network devices to UniConfig nodes in the Configuration datastore of the UniConfig transaction.","The RPC input contains a list of UniConfig nodes which configuration should be compared to actual configuration in the transaction The output of the RPC describes the result of compare-config and matches all input nodes with a list of statements representing the diff."]},{"l":"RPC Examples"},{"l":"Successful Example"},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, configuration of all touched nodes in the transaction is compared to synced device configuration."]},{"i":"successful-example-2","l":"Successful Example","p":["The RPC compare-config input has target node and there is no diff."]},{"l":"Failed Example","p":["The RPC compare-config input has two target nodes. One of the nodes,'R2', has not been installed yet. The output describes the result of the sync-from-network.","If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]},{"i":"failed-example-1","l":"Failed Example"}],[{"l":"RPC get-installed-nodes","p":["This RPC returns all installed devices from a specified topology.","If no topology is specified, the output may contain devices from multiple topologies (CLI, NETCONF, gNMI). In this case, devices must be installed with the install request parameter \"uniconfig-config:install-uniconfig-node-enabled\" set to \"true\". The RPC with no topology looks for nodes installed under the UNICONFIG topology by default."]},{"l":"RPC Examples"},{"l":"Successful example","p":["The RPC contains no topology defined in input and device called 'R1' is installed in the NETCONF topology. With parameter\"uniconfig-config:install-uniconfig-node-enabled\":\"true\" in install request is installed under UNICONFIG topology."]},{"i":"successful-example-1","l":"Successful example","p":["The RPC input contains no topology and device called 'R1' is installed in the NETCONF topology. With parameter \"uniconfig-config:install-uniconfig-node-enabled\":\"false\" in install request is not installed under UNICONFIG topology."]},{"i":"successful-example-2","l":"Successful example","p":["The RPC input contains the GNMI topology and device called 'R1' is installed in the topology."]},{"i":"successful-example-3","l":"Successful example","p":["The RPC input contains the CLI topology, but no devices are installed in the topology."]}],[{"l":"RPC health","p":["This RPC checks if UniConfig is running. If database persistence is enabled it checks database connection too."]},{"l":"RPC Examples","p":["RPC health input is empty and RPC output contains result of operation.","Response when database persistence is disabled:","Response when database persistence is enabled and database connection is valid:","Response when database persistence is enabled and database connection is not valid:"]}],[{"l":"RPC install-multiple-nodes","p":["This RPC installs multiple devices at once. It uses the default install-node RPC. Devices are installed in parallel."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains two devices (R1 and R2)."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains devices (R1 and R2) and R2 uses two different protocols."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains two devices (R1 and R2) and R2 is already installed using CLI protocol."]},{"l":"Failed Example","p":["RPC input does not specify node-id."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains two devices using the same node-id."]}],[{"l":"RPC is-in-sync","p":["This RPC can be used for verification whether the specified nodes are in-sync with the current state in the Operational datastore of UniConfig transaction. This verification is done by comparison of configuration fingerprints. The configuration fingerprint on the device is compared with the last configuration fingerprint saved in the Operational datastore. A fingerprint is usually represented by a configuration timestamp or the last transaction ID. The is-in-sync feature is supported only for device types that have implemented translation units for the 'frinx-configuration-metadata' OpenConfig module (using cli units, netconf units, or uniconfig-native metadata units).","The RPC input contains a list of UniConfig nodes for which the verification should be completed ('target-nodes' field). Response comprises the operation status for each of the nodes that was specified in the RPC input. If the operation failed it is because the specified node has not been successfully installed or connection has been lost or uniconfig doesn't have support for reading of configuration fingerprint from specific device type. Calling RPC with empty list of target nodes will result in invocation of RPC for each node that has been modified in the UniConfig transaction.","Possible RPC outputs per target node:","'status' field with value 'complete' with set 'is-in-sync' boolean flag; is-in-sync feature is supported and the configuration fingerprints have been successfully compared.","'status' field with value 'fail' with set 'error-type' to'no-connection' and corresponding 'error-message'; Unified mountpoint doesn't exist because the connection has been lost or the node has not been mounted yet.","'status' field with value 'fail' with set 'error-type' to'uniconfig-error' and corresponding 'error-message'; reading of the fingerprint from the Operational datastore or Unified mountpoint has failed, or the configuration metadata parsing is not supported for the device type.","Execution of the 'is-in-sync' RPC doesn't modify the Operational datastore. The configuration fingerprint that is stored in the Operational datastore is not updated. 'Sync-from-network' RPC must be used for updating the last configuration fingerprint and the actual configuration state."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["the RPC input contains valid nodes for which the synchronization status must be checked ('R1' is synced while 'R2' is not synced):"]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, all touched nodes will be invoked."]},{"l":"Failed Example","p":["RPC input contains invalid node, the 'R1' doesn't support comparison of fingerprints(metadata translation unit has not been implemented for this device)."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains 2 nodes, the first one 'R1' is valid and synced, the second one ('R2') has not been installed yet. If there is one invalid node, Uniconfig operation will fail with 1 error entry in the response."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC replace-config-with-operational","p":["RPC replaces the UniConfig topology nodes in the Config datastore with UniConfig topology nodes from the Operational datastore. The RPC input contains a list of the UniConfig nodes to replace from the Operational to the Config datastore of the UniConfig transaction. Output of the RPC describes the result of the operation and matches all input nodes. If RPC is invoked with empty list of target nodes, operation will be invoked for all nodes modified in the UniConfig transaction. If one node failed for any reason, RPC will fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC replace-config-with-operational input has 2 target nodes and the RPC output contains the result of the operation."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, configuration of all touched nodes will be replaced by operational state."]},{"l":"Failed Example","p":["RPC input contains a list of the target nodes. Node 'R1' has not been installed yet. The RPC output contains the result of the operation."]},{"i":"failed-example-1","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC sync-from-network","p":["The purpose of this RPC is to synchronize configuration from network devices to the UniConfig nodes in the Operational datastore of UniConfig transaction. The RPC input contains a list of the UniConfig nodes where the configuration should be refreshed within the network. Output of the RPC describes the result of sync-from-network and matches all input nodes. Calling RPC with empty list of target nodes results in syncing configuration of all nodes that have been modified in the UniConfig transaction. If one node failed for any reason, the RPC will fail entirely."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains nodes where configuration should be refreshed.","If RPC input does not contain the target nodes, all touched nodes in the transaction will be synced."]},{"l":"Failed Example","p":["RPC input contains a list of nodes where the configuration should be refreshed. Node 'R2' has not been installed yet."]},{"i":"failed-example-1","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC sync-to-network","p":["This RPC is a combination of sync-from-network and commit RPCs. If one of these RPCs fails the RPC will fail without any changes made.","The purpose of this RPC is to synchronize configuration from the UniConfig nodes in the Configuration datastore of UniConfig transaction to network devices. The RPC input contains a list of the UniConfig nodes which are to be updated on a network device. Output of the RPC describes the result of sync-to-network and matches all input nodes. Calling RPC with empty list of target nodes results in syncing configuration of all nodes that have been modified in the UniConfig transaction. If some node fails, the RPC fails entirely.","It is necessary for admin-state of UniConfig nodes, specified in the input, to be set to \"unlocked\"."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC input contains nodes which are to be updated on the corresponding network device."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, operation will be invoked on top of all touched nodes in the transaction."]},{"l":"Failed Example","p":["If one or more input nodes are not set to admin-state 'unlocked' the request will result in an error pointing out nodes with the wrong admin-state."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains only one node with bad admin-state."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input contains 2 nodes, the first one 'R1' is valid, the second one 'R2' has not been installed yet. If there is at least one invalid node, operation will fail."]},{"i":"failed-example-3","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"RPC uninstall-multiple-nodes","p":["This RPC uninstalls multiple devices at once. It uses the default uninstall-node RPC. Devices are uninstalled in parallel."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains two devices (R1 and R2)."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains devices (R1 and R2) and R2 is installed on two different protocols."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains two devices (R1 and R2) and R2 is already uninstalled on CLI protocol."]},{"l":"Failed Example","p":["RPC input does not specify node-id."]}],[{"l":"RPC validate","p":["The external application stores the intended configuration under nodes in the UniConfig topology. The configuration can be validated if it is valid or not. The trigger for execution of configuration validation is an RPC validate. RPC input contains a list of UniConfig nodes which configuration should be validated. Output of the RPC describes the result of the validation and matches all input nodes. It is valid to call this RPC with empty list of target nodes - in this case, all nodes that have been modified in the UniConfig transaction will be validated.","The configuration of nodes consists of the following phases:","Open transaction to device","Write configuration","Validate configuration","Close transaction","If one node failed in second (validation) phase for any reason, the RPC will fail entirely.","The validation (second phase) take place only on nodes that support this operation.","Validate RPC is shown in the figure bellow."]},{"l":"RPC Examples"},{"l":"Successful Example","p":["RPC validate input has 2 target nodes and the output describes the result of the successful validation."]},{"i":"successful-example-1","l":"Successful Example","p":["If the RPC input does not contain the target nodes, all touched node in the transaction will be validated."]},{"l":"Failed Example","p":["RPC commit input has 1 target node and the output describes the result of the validation. Node has failed because validation failed."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains 2 nodes, the first one 'R1' is valid, the second one 'R2' has not been installed yet. If there is one invalid node, Uniconfig will be evaluated nodes with fail."]},{"i":"failed-example-2","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there are not any touched nodes, the request will result in an error."]}],[{"l":"UniConfig properties","p":["UniConfig properties are application properties used to configure the application. They can be separated into three groups:","Runtime mutable properties can be modified in runtime (using the update-properties RPC), their changes take effect in runtime and the properties are persisted in the database.","Database persisted properties include all runtime mutable properties and some additional properties. These properties are stored in the database, which is always their primary source. With UniConfig Cloud Config, they remain constant across UniConfig instances in the same cluster and cannot be overridden via the application properties file.","Regular UniConfig properties comprise all the remaining properties. These properties can always be changed using the application.properties file and can differ between UniConfig instances.","Database persisted properties can be changed or read in application runtime without restarting UniConfig by using UniConfig Cloud Config and the following RPCs:","RPC read-properties","RPC update-properties"]}],[{"l":"RPC read-properties","p":["The read-properties RPC reads default properties from the database. If a specified property key does not exist in the database, they key is returned in the ignored keys section. The RPC works the same whether UniConfig Cloud Config is enabled or disabled.","read","If UniConfig Cloud Config is disabled, the read-properties RPC reads property values from the database. These values may differ from values in the application instance."]},{"l":"RPC examples"},{"l":"Successful example","p":["RPC input contains default property keys."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains properties that are not default properties or are private (crypto keys and crypto types)."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input consists of properties that do not exist in the database."]}],[{"l":"RPC update-properties","p":["The update-properties RPC is used to update property values. If UniConfig Cloud Config is enabled, it also calls Refresh Bus Endpoint to update properties in runtime for all connected UniConfig instances.","The RPC only updates default properties, except for crypto properties for which there are separate RPCs ( change-encryption-status and change-encryption-keys).","RPC sequence diagram with UniConfig Cloud Config enabled:","update-with-ucc","If UniConfig Cloud Config is disabled, the RPC only updates property values in the the database. The application instance continues to use the old property values, which can cause confusion.","Additionally, if a new UniConfig instance is started after properties have been updated, that instance will use the updated property values from the database. UniConfig instances will therefore use different values for the same property, as described in the diagram below.","We recommend that you use this RPC with UniConfig Cloud Config. The exception is callbacks.access-token, which is always up to date.","RPC sequence diagram with UniConfig Cloud Config disabled:","update-without-ucc"]},{"l":"RPC examples"},{"l":"Successful example","p":["RPC input contains the default properties with correct values."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains the crypto default property."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains an incorrect property key."]},{"l":"Failed example","p":["RPC input contains default properties with incorrect values."]},{"i":"failed-example-1","l":"Failed example","p":["RPC input contains default properties with incorrect values."]}],[{"l":"Utilities","p":["Utilities are simple programs that are part of the UniConfig distribution. After unpacking and building the distribution, utilities can be found in the 'utils' subdirectory."]},{"l":"YANG Packager"},{"l":"Difference between OpenAPI specifications"}],[{"l":"Difference between OpenAPI specifications"},{"l":"Introduction","p":["The Uniconfig distribution includes a program for checking the difference between OpenAPI specifications. After building and unpacking the distribution, you can find the program in the 'utils' directory as a shell script called called 'show_swagger_diff.sh'.","The program uses OpenAPI-diff to generate OpenAPI differences."]},{"l":"Usage","p":["The ./show_swagger_diff.sh script contains four arguments. Each one has its own identifier, so you can give arguments in any order. All arguments are optional as default values are included for each argument.","--former, -f /path/to/former/yaml/files- optional argument. Path to previous OpenAPI specifications (.yaml files). The default path is 'openapi_diff/old'.","--new, -n /path/to/new/yaml/files- optional argument. Path to new OpenAPI specifications (.yaml files). The default path is 'openapi_diff/new'.","--output, -o /path/to/output- optional argument. Path for the html output file with differences. The default path is 'openapi_diff'.","-s- optional argument. Silent printing, includes less information.","Bash script ./show_swagger_diff.sh also includes a simple help facility. There are two options for showing the help text:","./show_swagger_diff.sh -h","./show_swagger_diff.sh --help","The script only accepts YAML files."]},{"l":"Example use case"},{"l":"Default usage","p":["This example shows basic usage of the script with and without optional arguments. Open a terminal and the '../utils' directory, and run the following command:","OR"]},{"l":"Usage with non-existent input path","p":["This example shows basic usage of the script where some specified input directories do not exist. Open a terminal and the '../utils' directory, and run the following command:"]}],[{"l":"YANG packager"},{"l":"Introduction","p":["YANG packager is a simple program which is part of the UniConfig distribution. User can finds it in the utils/ directory after building and unpacking the UniConfig distribution. User can use it by simple shell script called'convertYangsToUniconfigSchema.sh'. YANG packager is responsible for:","validation of user-provided YANG files","copying valid YANG files to the user-defined directory","informing the user about conversion process"]},{"l":"Usage","p":["-d /path/to/default- optional argument. Sometimes some YANG files need additional dependencies that are not provided in source directories. In this case it is possible to use path to the 'default' directory which contains additional YANG files. If there is this missing YANG file, YANG packager will use it.","-enableSwagger- optional argument. Path to file that enables OpenAPI generation.","-g- optional argument. Path to directory where generated Java sources with constants from YANG elements are saved. By default, generation of Java files is disabled.","-i /path/to/sources- required argument. User has two options for where the path can be directed:","-jd- optional argument. Flag that enables to generate java documentation on data elements.","-o /path/to/output-directory- required argument. User can define path where he wants to save valid YANG files. If the output directory exists, it will be replaced by a new one.","-pn- optional argument. Custom package name of generated classes.","-px- optional argument. Flag that enables prefix for generated constants names inside generated classes.","-r- optional argument. Selection of repositories inside source directory with files or file with defined names of directories which contains files, from which constants will be generated.","-s /path/to/skip-list- optional argument. User can define YANG file names in text file that he does not want to include in conversion process. This file must only contain module names without revision and .yang suffix.","-to-file- optional argument. When user uses this flag, then YANG packager also saves the debug output to a file. This file can be found on a same path as output-directory. It will contain suffix '-info' in its name. If the output directory is called 'output-directory', then the file will be called 'output-directory-info'.","./convertYangsToUniconfigSchema --help","./convertYangsToUniconfigSchema -h","Bash script ./convertYangsToUniconfigSchema also includes simple help facility. There are two options how to show the help text:","If compilation process detected some invalid YANG files then output directory will not be created. In this case, user has to fix invalid YANG files or use a combination of \"-d\" and \"-s\" arguments.","Script ./convertYangsToUniconfigSchema contains four arguments. Each one has its own identifier so user can use any order of arguments. Two arguments are required, namely the path to resources that contain YANG files and the path to the output directory where user wants to copy all valid YANG files. Other three arguments are optional. First one is the path to the\"default\" directory which contains some default YANG files, second one is the path to the \"skip-list\" and last one is a \"-to-file\" flag, which user can use when he wants to write a debug output to file.","The user is responsible for the validity of YANG files in the default directory. These files are not checked by YANG package.","to the directory that contains YANG files and other sub-directories with YANG files","to the text-file that contains defined names of directories. These defined directories have to be stored on the same path as text-file."]},{"l":"Example use-case"},{"l":"Basic usage 1","p":["This is basic usage of the script where only mandatory arguments are used. In this case, there is a directory with YANG files used as source. All files in source directory are valid YANG files. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Basic usage 2","p":["This is basic usage of the script where only mandatory arguments are used. In this case, there is directory with YANG files used as source. Source directory also contains one invalid YANG file with missing import. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Basic usage 3","p":["This is basic usage of the script where only mandatory arguments are used. In this case, there is directory with YANG files used as source. Source directory also contains one non-yang file. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Usage with default directory","p":["This is usage with path to default directory that contains one YANG file openconfig-mpls. Source directory also contains one invalid YANG file 'cisco-xr-openconfig-mpls-deviations.yang' with missing import 'openconfig-mpls'. This missing import is loaded from default directory. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Usage with skip-list","p":["This is usage with path to skip-list text file that contains one YANG file name cisco-xr-openconfig-mpls-deviations. This YANG file will not be included in the conversion process. Open a terminal, go to the ../utils directory and run command:"]},{"l":"Usage with text-file as a source","p":["In this example a path to text-file with defined names of source directories is used.","Open a terminal, go to the ../utils directory and run command:"]},{"i":"usage-with--to-file-flag","l":"Usage with -to-file flag","p":["This is usage where output is also printed to file. User can find output information file on the path /path/to/output-info.","Open a terminal, go to the ../utils directory and run command:"]},{"i":"usage-with-text-file-as-a-source-and--to-file-flag","l":"Usage with text-file as a source and -to-file flag","p":["In this example a path to text-file with defined names of source directories is used and also flag for print outputs to files. User can find output information files on paths /path/to/output/directory-1-info and /path/to/output/directory-2-info","Open a terminal, go to the ../utils and run command:","Content of text-file"]},{"i":"usage-with--enableswagger-flag","l":"Usage with '-enableSwagger' flag","p":["In this example a path to a text-file with defined names of source directories is used. A flag to print outputs to files and a flag to enable swagger for OpenAPI files generation. The swagger configuration file is located at ../utils/config/swagger-config.json. Swagger output file / files are generated per directory, and they are located in the output directory. The user can find output information files on paths /path/to/output/directory-1-info and /path/to/output/directory-2-info.","Open a terminal, go to the ../utils directory. Run the command:","Additional parameters are available for swagger generation that further customise the OpenAPI file / files. These parameters are located at the beginning of the page.","The output then looks like this:"]},{"i":"error---source-directory-does-not-exist","l":"Error - source directory does not exist","p":["User-defined source directory does not exist.","Open a terminal, go to the ../utils directory and run command:"]},{"i":"error---source-directory-is-empty","l":"Error - source directory is empty","p":["User-defined source directory is empty. Open a terminal, go to the ../utils directory and run command:"]},{"i":"error---sources-defined-in-text-file","l":"Error - sources defined in text-file","p":["One directory defined in the text-file is empty and other one does not exist.","Open a terminal, go to the ../utils and run command:","Content of text-file"]}],[{"l":"Admin State","p":["Admin state is used to lock, unlock or southbound-lock devices. Modification of data on those devices is then allowed or forbidden accordingly. Currently, there are three states that are supported:","LOCKED - When a device is administratively locked, it is not possible to modify its configuration, and no changes are ever pushed to the device.","UNLOCKED - Device is assumed to be operational. All changes are attempted to be sent southbound. This is the default when a new device is created.","SOUTHBOUND_LOCKED - It is possible to configure the device, but no changes are sent to the device. Admin mode is useful when pre provisioning devices.","This state is automatically added to the device during installation. The user can further specify what state the device should be in, via:","\"uniconfig-config:admin-state\": \"unlocked\"","The state variable should be from one of the above-mentioned options.","If the user wants to change the state after the installation, an RPC for changing that state is available."]},{"l":"RPC Example","p":["RPC input contains the device name and the state that it should be change to."]},{"i":"rpc-example-1","l":"RPC Example","p":["GET request to get the actual state of the device."]},{"l":"RPC Failed Example","p":["Device is in locked admin-state and the user tries to modify data on the device."]}],[{"l":"Build-and-Commit Model"},{"l":"Introduction","p":["Build-and-commit model is based on explicit creation of the transaction, invoking operations in the scope of this transaction and finally committing or closing transaction. The transaction represents a session between the client and the UniConfig instance.","Using explicitly created transactions has multiple advantages in comparison to Immediate Commit Model:","Multiple operations and modifications can be invoked in the single transaction while keeping transactions isolated.","Most of the UniConfig operations, such as calculate-diff and commit, doesn't have any usage in the Immediate Commit Model - they are valuable only if the Build-and-Commit Model is used.","The transaction allows a client to identify if it still communicates with the same UniConfig instance (this property is usable in the clustered deployment). If the UniConfig instance does not know about the transaction then the request will fail because transaction expired, is closed, or has never been created."]},{"l":"Configuration","p":["Configuration related to UniConfig transactions is placed in the'config/lighty-uniconfig-config.json' file under 'transactions' container. Note that build-and-commit model is enabled if'uniconfigTransactionEnabled' is set to 'true' value (default value)."]},{"l":"Optimistic locking mechanism","p":["Race condition between transactions that are committed in parallel and contain changes of same nodes (uniconfig, unistore, snapshot, or template nodes) is solved using optimistic locking mechanism. Configuration of same node can be modified in parallel from 2 transactions, however only the first committed transaction will succeed. Commit of the second transaction will fail.","UniConfig uses 2 different techniques for detection of conflicts during commit or checked-commit operation:","Comparison of configuration fingerprints - Fingerprint value is updated for altered node at the end of the commit operation - at the beginning of commit operation, UniConfig compares the value of actual fingerprint in database with value of fingerprint read before the first CRUD operation done in the transaction and the last synced fingerprint (updated after execution of sync-from-network RPC). If actual fingerprint from database equals to fingerprint read before the first CRUD operation or the last synced fingerprint, then commit operation can continue. Otherwise, error is returned without touching any devices on network.","Per-node advisory locks - Comparison of configuration fingerprints are reliable if transactions are committed one after another. However, such serialization cannot be achieved in the clustered environment because UniConfig instances are not coordinated. If 2 transactions are committed at the same time and both assume that configuration fingerprints haven't been updated by other transaction, both transactions may start to push changes to network devices at the same time. To prevent prevent occurrences of this scenario, UniConfig locks node in the PostgresSQL database using transaction-level advisory locks at the beginning of commit operation. If other transaction tries to lock the same node, this attempt will fail, and second transaction will not enter critical section - rather it will fail. Locks are automatically released at the end of the transaction (commit RPC closes transaction).","All possible scenarios are captured in the following diagrams.","Optimistic locking"]},{"l":"Dynamic mountpoints","p":["Mountpoints are created only when UniConfig needs to read / write some data from / to device and lifecycle of mountpoint is bounded by lifecycles of transactions that use the mountpoint. If some mountpoint is not used by any transaction, then UniConfig automatically closes this mountpoint - associated operational data on southbound layer and connection to device are removed.","The first diagram demonstrates mounting of 2 devices which are used by 1 transaction - after this transaction is closed, both mountpoints are closed. The second diagram shows scenario in which 2 transactions share 1 of 2 mountpoints - after the first transaction is closed, 1 of the mountpoints is not closed since the second transaction still may communicate with corresponding device."]},{"l":"Creation of transaction","p":["Transaction can be created using create-transaction RPC. RPC doesn't specify input body and also returns response without body. Response additionally contains Set-Cookie header with UNICONFIGTXID key and corresponding value - transaction identifier that conforms RFC-4122 Universally Unique IDentifier (UUID) format.","Process of transaction creation is depicted by following sequence diagram.","create-transaction RPC","UniConfig is performing following steps after calling create-transaction RPC:","Creation of connection to database system - Connection is created with disabled auto-commit - enabling transactional features. UniConfig uses 'read committed' isolation level.","Creation of database transaction - It provides access to remote PostgreSQL database. Using database transaction it is possible to read committed data, read uncommitted changes created by this transaction and write modifications to database. Data read at the first access to some resource is cached to datastore transaction - when some component tries to access the same resource again, it is read only from datastore transaction. Data is written to database transaction at invocation of commit/checked-commit RPC.","Creation of datastore read-write transaction - It provides access to OPER and CONFIG datastores bound to this transaction. Datastore is used only as a cache between application and PostgreSQL database, and it resides only in the memory allocated to UniConfig process. Datastore transaction is never committed - cache is trashed at the end of the transaction life.","Registration of transaction - Transaction is always bound to 1 specific UniConfig instance."]},{"l":"Successful example","p":["The following request shows successful creation of UniConfig transaction. Response contains Set-Cookie header with UNICONFIGTXID key and value."]},{"l":"Failed example","p":["The most common reason for failed creation of UniConfig transaction is reached maximum number of open transactions that is limited by('maxDbPoolSize' - 'maxInternalDbConnections') database connection pool setting. In that case, UniConfig returns response with 500 status code."]},{"l":"Transaction idle-timeout","p":["Create-transaction RPC can be used with optional query parameter called timeout. This parameter is used to override global idle timeout for transaction created by this RPC call. After transaction inactivity for specified time transaction will be automatically cleaned. Value of this parameter is whole number and defines time in seconds."]},{"l":"Dedicated session to device","p":["By default, UniConfig shares southbound session to network device, if multiple UniConfig transactions use the same device via same management protocol. This behaviour can be disabled using 'dedicatedDeviceSession' query parameter which accepts boolean value. Afterwards, UniConfig transaction will create dedicated session to device which is used only by one transaction and closed immediately after committing or closing the transaction.","Dedicated sessions to device are useful when:","Device is not able to process requests in parallel via same session.","Device is able to process requests in parallel via same session, but it doesn't process them in parallel","decreasing processing performance."]},{"l":"Invocation of CRUD operation in transaction","p":["CRUD operations for modification or reading node configuration can be invoked in the specific transaction by appending UNICONFIGTXID (key) with UUID of transaction (value) to Cookie headers. In that case, operation will be invoked only in the scope of single transaction - changes are not visible to other transactions until this transaction is successfully committed.","Next diagram describes execution of CRUD operation from RESTCONF API. It shows also difference between datastore and database transaction - data is read from database only at the first access to some data (for example, node configuration). After that, this configuration is cached inside temporary datastore transaction - goal is to improve performance by limiting transferring data between UniConfig and PostgreSQL. Next access to same configuration can be evaluated under in-memory datastore.","Invocation of CRUD"]},{"i":"successful-example-1","l":"Successful example","p":["The following request demonstrates reading of some configuration from uniconfig topology, junos node in the transaction with ID'd7ff736e-8efa-4cc5-9d27-b7f560a76ff3'."]},{"i":"failed-example-1","l":"Failed example","p":["Trying to use non-existing UniConfig transaction results in 422 status code (Unprocessable Entity)."]},{"l":"Invocation of RPC operation in transaction","p":["RPC operation can be invoked in the specific transaction the same way as CRUD operation - by specification of UNICONFIGTXID in the Cookie header.","There are few differences between CRUD and RPC operations from the view of transactions:","Commit, checked-commit, and close-transaction RPCs can state of the transaction. Create-transaction RPC is reserved for creation of transaction.","Not all RPC operations that are exposed by UniConfig use dedicated transactions - in that case, these RPCs just ignore explicitly specified transaction and either don't work with transactions at all or create transaction internally (examples: install-node, uninstall-node RPC).","There are also transaction-aware operations that directly leverage properties of transactions. For example, if some UniConfig RPC is invoked with empty list of target nodes, then operation is automatically applied to all modified nodes in the transaction(calculate-diff RPC with empty target nodes computes diff for all modified nodes in the transaction).","Following diagram shows execution of random RPC in the specified transaction.","Invocation of RPC"]},{"i":"successful-example-2","l":"Successful example","p":["Invocation of calculate-diff RPC in the transaction which contains modifications done on the 'junos' node."]},{"i":"failed-example-2","l":"Failed example","p":["Invocation of calculate-diff RPC with transaction ID that has wrong format."]},{"l":"Closing transaction","p":["There are 2 options how transaction can be closed:","close-transaction RPC - Explicit closing of transaction that results in dropping of all changes done in the transaction.","commit/checked-commit RPC - After execution of commit operation, transaction is automatically closed (despite of commit result). Behaviour of commit and checked commit RPC is described in better detail under the 'UniConfig Node Manager' section.","Close-transaction RPC doesn't contain body, only Cookie header with UNICONFIGTXID property pointing to transaction that user would like to close. Response contains information if transaction has been successfully closed.","Following sequence diagrams describe close-transaction procedure. It is split into 2 diagrams to improve readability and to reuse some parts from other diagrams.","close-transaction RPC","Clean orphaned mountpoints","Briefly depicted most important actions:","Loading UniConfig transaction from registry by provided transaction ID that is extracted from Cookie header.","Closing connection to database.","Cancellation of database transaction.","Cancellation of datastore read-write transaction.","Unregistration of transaction from local registry.","Unmounting nodes that are not referenced by any UniConfig transaction - connection to device is closed and representing southbound / Unified mountpoints are removed together with state data.","After transaction is closed, it cannot be used by any other operation - user must create a new transaction in order to use build-and-commit model."]},{"i":"successful-example-3","l":"Successful example","p":["Closing existing transaction using close-transaction RPC. Response doesn't body, only status code 200."]},{"i":"failed-example-3","l":"Failed example","p":["If transaction has already been closed, user will receive response with JSON body containing error message."]},{"l":"Transaction cleaner","p":["Transaction cleaner is used for automatic closing of transactions that are open longer then specified timeout value ('transactionIdleTimeOut' or 'maxTransactionAge' setting in the configuration). Transaction resets her time setting 'transactionIdleTimeOut' after invoking CRUD, RPC operation, and is still valid for time specified in value of setting. This mechanism effectively suppresses application-level errors - open transactions are not closed at the end of the workflow.","Next sequence diagram describes cleaning process. Referenced diagram'Close transaction' is placed in the previous 'Closing transaction' section."]},{"l":"Use cases"},{"l":"Modification of different devices in separate transactions","p":["1. Installation of 2 devices - ‘xr6_1’ and ‘xr6_2’ (without transaction ID)","2. Creation of 2 uniconfig transactions: let’s name them TX1 and TX2","3. Modification of ‘xr6_1’ uniconfig configuration inside TX1","4. Modification of ‘xr6_2’ uniconfig configuration inside TX2","5. Verification if TX1 and TX2 are isolated","6. Committing TX1 and TX2 using uniconfig-manager:commit RPC","7. Verification of committed data","8. Verification if TX1 and TX2 are closed","All 3 responses - Status 200 OK with returned expected data. Similar verification can be done on 'xr6_2'.","Both responses should return Status 404 Not Found:","Creation of new Loopback79 interface - cookie header contains UNICONFIGTXID of TX2:","Creation of new Loopback97 interface in the TX1 - cookie header contains UNICONFIGTXID of TX1:","It is not required to specify target nodes in the input because UniConfig transaction tracks modified nodes:","Response - Status 422 Unprocessable Entity:","Response:","Since there aren't any conflicts between modifications in the committed transactions, both RPCs should succeed. Expected responses:","The first response contains transaction-id of TX1 that can be used in the subsequent requests that belong to TX1:","The first second contains transaction-id of TX2 that can be used in the subsequent requests that belong to TX2:","Trying to read some data in the TX1:","Trying to read some data in the TX2:","TX1 doesn't see modifications done in TX2 and vice-versa:","Verification if configuration was correctly committed to devices (direct read under yang-ext:mount) and if datastore was updated (GET request without transaction ID):","Verification if TX1 contains created interface (Cookie header contains UNICONFIGTXID of TX1):","Verification if TX2 contains created interface (Cookie header contains UNICONFIGTXID of TX2):"]},{"l":"Modification of sub-tree on same device in separate transactions","p":["1. Installation of device ‘xr6_1’","2. Preparation of configuration on 'xr6_1'","3. Creation of 2 uniconfig transactions: let’s name them TX1 and TX2","4. Modification of ‘xr6_1’ uniconfig configuration inside TX1","5. Modification of ‘xr6_1’ uniconfig configuration inside TX2","6. Commit TX1","7. Commit TX2","8. Verification of committed data in TX1 / non-committed data in TX2","9. Verification if TX1 and TX2 are closed","Changing description of interface Loopback97 to 'next loopback': - there is a conflict with TX1 which also tries to create/replace the configuration of the same interface:","Changing description of interface Loopback97 to 'test loopback':","Commit TX1 without target nodes - it should fail because the same node has already been modified by different transaction that has already been committed:","Commit TX2 without target nodes - it should pass:","Creation of Loopback97 interface with some initial description:","Creation of the uniconfig transaction TX1:","Creation of the uniconfig transaction TX2:","Respective responses:","Response - Status 200 OK with error message:","Response:","Trying to read some data in the transaction:","Verification if committed changes in TX1 were applied to datastore and device:"]}],[{"l":"Device Discovery","p":["\"addressCheckLimit\" specifies how many addresses are checked. If more addresses are specified in the request, the request will not be successful.","\"max-pool-size\" specifies the size of the executor that is used. If the amount of addresses in the request is high, consider raising the value.","\"network\": \"192.168.1.0/24\"","\"start-ipv4-address\": \"192.168.1.1\", \"end-ipv4-address\":\"192.168.1.254\"","/opt/uniconfig-frinx/config/application.properties","~/FRINX-machine/config/uniconfig/frinx/uniconfig/config/application.properties","Execute the ifconfig command in the terminal and look for an interface. If you are using a VPN, the interface is often called tun0. If not, look for a different interface. Copy inet from the interface and paste it into the file.","For testing, you need to add your IP address to the configuration JSON file. The configuration file is located under","If you specify the range using a network statement, the network address and broadcast address will not be included in the discovery process. If you specify the range via range statements, make sure that only hosts addresses are included in the specified range.","If you want to discover hosts and ports in listening state in a network, do not add the network and broadcast address of that network. For example, if you want to check the network \"192.168.1.0/24\", you can use one of the following:","initial-pool-size of the thread pool that is used by the executor.","kepalive-time specifies the time (in seconds) before the execution of a specified task is timed out.","RPC device-discovery is used to verify reachable devices in a network. You can either check a single IP address in IPv4 format, a network or a range of addresses. Additionally, you can also specify a port or range of ports (TCP or UDP) that are checked if they are open. The ICMP protocol is used to check the availability devices.","The input consists of a list of all IP addresses that should be checked(IPv4 or IPv6, a single IP address or a network with a prefix, or a range of IP addresses). Additionally, it contains the TCP/UDP ports that should be checked whether they are open or not on the given addresses.","The output of the RPC shows if the IP addresses are reachable via the ICMP protocol. For every IP address, a list of open TCP/UPD ports is also included.","The snippet contains two additional parameters.","When running UniConfig stand-alone, the config file is in the config folder:"]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains a network with the prefix /29. Addresses in the network and desired ports are checked for availability. The output contains reachable addresses in the network and all open TCP/UDP ports."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input contains a range of addresses. The addresses and desired ports are checked for availability. The output contains reachable addresses and all open TCP/UDP ports."]},{"i":"successful-example-2","l":"Successful example","p":["RPC input contains the host name and ports that are checked for availability. The output shows if the host is reachable as well as all open TCP/UDP ports."]},{"l":"Failed Example","p":["RPC input contains two addresses that are incorrectly wrapped."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains an IP range where the start point is greater than the end point."]},{"l":"Not supported operation Example","p":["RPC input contains a network in IPv6 format that is currently not supported."]}],[{"l":"Dry-run manager"},{"l":"RPC dryrun-commit","p":["The RPC will resolve the diff between actual and intended configuration of nodes by using UniConfig Node Manager. Changes for CLI nodes are applied by using cli-dryrun mountpoint which only stores translated CLI commands to the cli-dry-run journal. After all changes are applied, the cli-dryrun journal is read and an RPC output is created and returned. It works similarly with NETCONF devices, but it outputs NETCONF messages instead of CLI commands. RPC input contains a list of UniConfig nodes for which to execute the dry run. Output of the RPC describes the results of the operation and matches all input nodes. It also contains a list of commands, and NETCONF messages for the given nodes. If RPC is called with empty list of target nodes, dryrun operation is executed on all modified nodes in the UniConfig transaction. If one node failed for any reason the RPC will be failed entirely.","RPC dryrun commit"]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to the device if the RPC commit or checked-commit was called."]},{"i":"successful-example-1","l":"Successful example","p":["RPC input does not contain target nodes, dryrun is executed with all modified nodes."]},{"l":"Failed Example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to the device if the RPC commit or checked-commit was called. One node does not support dry-run."]},{"i":"failed-example-1","l":"Failed Example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to the device if the RPC commit or checked-commit was called. One node has a bad configuration."]},{"i":"failed-example-2","l":"Failed Example","p":["RPC input contains the target node and the output contains a list of commands which would be sent to a device if the RPC commit or checked-commit was called. One node does not support dry-run (IOSXR) and one is not in the unified topology (IOSXRN). There is one extra node, which has not been mounted yet (AAA)."]},{"i":"failed-example-3","l":"Failed Example","p":["RPC input contains a target node and the output contains a list of commands which would be sent to a device if the RPC commit or checked-commit was called. One node has not been mounted yet (AAA)."]},{"i":"failed-example-4","l":"Failed Example","p":["If the RPC input does not contain the target nodes and there weren't any touched nodes, the request will result in an error."]}],[{"l":"Immediate Commit Model","p":["The immediate commit creates new transactions for every call of an RPC. The transaction is then closed so no lingering data will occur.","For reading data (GET request), a sequential diagram was created for better understanding of how the whole process works.","Get Request","Similarly, a sequential diagram for putting data (PUT request) was created as well.","Put Request","The key difference in those diagrams is that editing data (PUT, PATCH, DELETE, POST) + RPC calls in the database need to be committed, so there is an additional call of the commit RPC. This commit ensures that the transaction is closed. For reading data, it is necessary to close the transaction differently, because no data were changed, so calling a commit would be unnecessary.","When calling the 'sync-from-network' RPC, it internally calls'replace-config-with-operational'. Note that this only works when using the Immediate Commit Model."]},{"l":"Configuration","p":["Configuration related to UniConfig transactions is placed in the'config/lighty-uniconfig-config.json' file under 'transactions' container. A user can turn off the Immediate Commit Model and use only the Build and Commit Model instead."]},{"l":"RPC Examples"},{"l":"Successful example","p":["RPC input contains a new interface that will be added to the existing ones.","After putting the data into the database, they will be automatically committed and can be viewed."]},{"l":"Failed Example","p":["RPC input contains a value that is not supported."]}],[{"l":"Kafka Notifications"},{"l":"Introduction","p":["NETCONF devices produce NETCONF notifications. UniConfig can collect these and create its own UniConfig notifications about specific events. Notifications from both NETCONF devices and UniConfig are published using Kafka.","The following notification types are available:","NETCONF notifications","Notifications about transactions","Audit logs (RESTCONF notifications)","Data-change events","Connection notifications","Each notification type is stored in its own topic in Kafka. Additionally, all notifications are stored in one table in the database.","notifications-in-cluster"]},{"l":"Kafka","p":["Apache Kafka is a publish–subscribe-based, durable messaging system that sends messages between processes, applications and servers. Within Kafka, you can define topics (categories) and applications can add, process and reprocess records.","In our specific case, UniConfig publishes notifications. Each type of notification is stored in a separate topic and can therefore be subscribed to independently. The names of topics and connection data are configurable in the file lighty-uniconfig-config.json."]},{"l":"NETCONF notifications","p":["RFC 5277 defines a mechanism where the NETCONF client indicates an interest in receiving event notifications from a NETCONF server by subscribing to event notifications. The NETCONF server replies whether the subscription request was successful and, if so, starts sending event notifications to the NETCONF client as events occur within the system. Event notifications are sent until either the NETCONF session or the subscription is terminated.","NETCONF notifications are categorised as so-called streams. The subscriber must choose which streams to receive. The default stream is named NETCONF."]},{"l":"Notifications about transactions","p":["This type of notification is generated after each commit in UniConfig.","It contains the following:","transaction id","calculate diff result","commit result"]},{"i":"audit-logs-restconf-notifications","l":"Audit logs (RESTCONF notifications)","p":["Below are three examples of notifications with the response body and the calculation difference result.","body","http-method","It contains the following:","query-parameters","request data","response data","source-address","source-port","status-code","The first example is for created data:","The response body does not need to be included in notifications. It can be configured using the includeResponseBody parameter in the application.properties file. Also, the calculation difference result can be part of the notification if the parameter includeCalculateDiffResult parameter is set to true in the file application.properties.","The second example is for deleted data:","The third example is for updated data:","This type of notification is generated after each RESTCONF operation.","transaction id","uri","user-id"]},{"l":"Shell notifications","p":["This type of notification is generated after each shell operation.","It contains the following:","transaction id","request data","source-address","source-port","prompt","executed command","response data","output"]},{"l":"Data-change events","p":["A subscription step is required before data-change events are generated and published into Kafka. With the subscription, a user can specify observed subtrees against data changes. Afterwards, data-change events are generated by UniConfig instances when a transaction is committed and committed changes contain subscribed subtrees.","A sample data-change event captured by Kafka console consumer:","For data-change events, the streamName is always 'DCE' and the identifier for the YANG notification is 'data-change-event'. The body contains the following:","subscription-id: Identifier of the subscription that triggers generation of data-change-event. Subscription identifier makes association of subscriptions and received data-changes-events easier than using combination of multiple fields such as node identifier, topology identifier and subtree path.","transaction-id: Identifier of committed transaction that triggered data-change-event after commit or checked-commit UniConfig operations.","edit - List of captured modifications done in the committed transaction.","Edit entry fields:","subtree-path: Relative path to data-tree element at which data-change happened. Path is relative to subtree-path specified during subscription.","data-before: JSON representation of subtree data before done changes. If this field is not present, then 'data-after' represents created data.","data-after: JSON representation of subtree data including done changes. If this fields is not present, then'data-before' represents removed data.","operation: Operation type of the data change event.","node-id: Node identifier of the data change event.","topology-id: Topology where the node exists. Can be either 'uniconfig' or 'unistore'."]},{"l":"Connection notifications","p":["Connection notification are generated whenever the status of a node changes. For connection notifications, the streamName is always 'CONNECTION' and the identifier for the YANG notification is ' connection-notification'.","It contains the following:","topology id","node id","connection status","connection message","Supported topologies are cli, netconf and gnmi.","Sample connection notifications captured by Kafka console consumer:","CLI disconnect notification:","NETCONF connect notification:"]},{"l":"Database entities","p":["body - full notification body in JSON format","creation time - time when subscription was created","end time - time when notifications stop to be collected","event time - time when notification was generated","Example request for reading Kafka settings using RESTCONF:","Example request for reading notifications using RESTCONF:","Example request for reading subscriptions using RESTCONF:","identifier - name of the YANG notification","netconf-subscription","node id - id of the NETCONF node from which notifications should be collected","node id - node id of the NETCONF device for NETCONF notifications or identifier of UniConfig instance in case of other types of notifications","notification","Notifications are stored in the notification table. It contains the following columns:","settings","start time - time when notifications start to be collected","stream name - name of the notification stream - NETCONF stream name or UniConfig-specific stream name","stream name - NETCONF stream name","The following three tables in the database are related to notifications:","The netconf-subscription table is used to track NETCONF notification subscriptions. It contains the following columns:","The settings table contains two columns: identifier and config. Records with the identifier kafka contain configurations for Kafka that can be modified at runtime.","UniConfig instance id - instance id of UniConfig that is collecting notifications from the NETCONF device"]},{"l":"NETCONF subscriptions","p":["A subscription is required to receive NETCONF notifications from a NETCONF device. Subscriptions are created using an install request:","Subscriptions to notification streams are defined as a list with the name stream. There is one record for each stream. The only required parameter is stream-name. The following optional parameters are supported:","start-time- must be specified to enable replay and should start at the specified time.","stop time- used with the optional replay feature to indicate the newest notifications of interest. If stopTime is not specified, notifications will continue until the subscription is terminated. Must be used with and set to be later than start-time. Values in the future are valid.","The creation of a new subscription for the stream will terminate all existing subscriptions for the stream."]},{"i":"monitoring-system---processing-netconf-subscriptions","l":"Monitoring system - processing NETCONF subscriptions","p":["Inside UniConfig, NETCONF notification subscriptions are processed in an infinite loop within the monitoring system. An iteration of the monitoring system loop consists of following steps:","Check global setting for NETCONF notifications","If turned off, release all NETCONF subscriptions and end current iteration","Release cancelled subscriptions","Query free subscriptions from DB, and for each one:","Create a notification session (create mountpoint and register listeners)","Lock the subscription (set UniConfig instance)","There is a hard limit for the number of sessions that a single UniConfig node can handle. If the limit is reached, the UniConfig node refuses any additional subscriptions.","The loop interval, hard subscription limit and maximum number of subscriptions processed per interval can be set in the file lighty-uniconfig-config.json."]},{"l":"Dedicated NETCONF session for subscription","p":["A NETCONF device may have the interleave capability that indicates support for interleaving other NETCONF operations within a notification subscription. This means that the NETCONF server can receive, process and respond to NETCONF requests on a session with an active notification subscription. As not all devices include support for this capability, the common approach for devices 'with' and 'without' interleave capability is to track notifications with a separate NETCONF session. To support this functionality, UniConfig creates a separate NETCONF session with a separate mount-point for every subscription. These mount points and sessions are automatically destroyed when the corresponding subscription is closed.","monitoring-system"]},{"l":"Subscription to data-change events"},{"l":"Creating a new subscription","p":["'BASE': Represents only a direct change of the node on subtree-path, such as replacement of a node, addition or deletion.","'ONE': Represent a change (addition, replacement, or deletion) of the node on the subtree-path or one of its direct child elements.","'SUBTREE': Represents a change of the node or any of its child nodes, direct and nested. This scope is a superset of ONE and BASE. This is the default value.","captured data-change-events from whole node configuration.","data-change-scope: Data-tree scope that specifies how granular data-change-events should be captured and propagated to Kafka. There are three options:","deleting existing subscription","displaying information about created subscription using RPC","Example: Creating a subscription to the node device1 in the uniconfig topology, and to the whole configuration subtree '/interfaces'.","Example: Creating a subscription to the uniconfig topology and to the whole /interfaces configuration subtree.","node-id: Identifier for the node from which data-change-events are generated. This field is optional. If not given, a global subscription is created and data-change-events are generated for all nodes under the topology.","RPC input contains the following:","RPC output contains only the generated 'subscription-id' in the format of UUID. This subscription identifier represents a token that can be used for the following:","sorting received Kafka messages","Subscriptions to data-change events are created using the 'create-data-change-subscription' RPC. After the subscription is done, UniConfig listens to data-change events on selected nodes and subtrees and distributes the corresponding messages to a dedicated Kafka topic.","subtree-path: Path to the subtree from which the user would like to receive data-change-events. Default path is '/'","topology-id: Identifier for the topology where the specified node is placed."]},{"l":"Removing a subscription","p":["Existing subscriptions can be removed using the delete-data-change-subscription RPC and the provided subscription-id. After a subscription is removed, UniConfig stops generating new data-change events related to the subscribed path.","RPC input contains only subscription-id, a unique identifier for the subscription to data-change events. RPC output does not contain a body. The RPC returns 404 if no subscription exists for the provided identifier.","Example: Removing a subscription with the ID 8e82453d-4ea8-4c26-a74e-50d855a721fa."]},{"l":"Successful Example"},{"l":"Failed Example"},{"l":"Showing information about subscription","p":["The RPC show-subscription-data is used to display information about a created subscription.","RPC input contains the identifier of the target subscription.","RPC output for existing subscriptions contains 'topology-id', 'node-id', 'subtree-path' and 'data-change-scope' - the same fields that can also be specified in the 'create-data-change-subscription' RPC input.","If no subscription exists with the specified ID, the RPC returns a 404 status code with a standard RESTCONF error container.","Example: Displaying information"]},{"i":"successful-example-1","l":"Successful Example"},{"i":"failed-example-1","l":"Failed Example","p":["It is also possible to fetch all created subscriptions under a specific node or topology by sending a GET request to the data-change-subscriptions list under the node list item (operational data).","Example (there are two subscriptions under the device1 node):"]},{"l":"Configuration","p":["All notifications and the monitoring system can be enabled or disabled using the enabled flag.","All settings related to Kafka are grouped under kafka property. For authentication, there are the username and password properties. For the Kafka connection, there is the kafkaServers property. This contains a list of Kafka servers as a combination of brokerHost and brokerListeningPort. Broker host can be either an IP address or hostname.","archiveUrl - where to download kafka from","Audit logs settings are under auditLogs property. Currently there is only one flag includeResponseBody, which is used to enable or disable logging of the body of RESTCONF responses.","auditLogsEnabled","auditLogsTopicName - topic name for audit logs","blockingTimeout - How long the send() method and the creation of a connection for reading metadata methods will block (in ms).","cleanDataBeforeStart - if kafka config should be cleared before start","Configurations for notifications are in the lighty-uniconfig-config.json file, under the notifications property. The entire configuration looks like this:","dataChangeEventsEnabled","dataChangeEventsTopicName - topic name for data-change-events","dataDir - kafka data directory","deliveryTimeout - The upper bound on the time to report success or failure after a call to send() returns (in ms). Sets a limit on the total time that a record will be delayed prior to sending, the time to wait for acknowledgement from the broker (if expected) and the time allowed for retriable send failures.","enabled - flag that enables or disables embedded kafka","installDir - where should be kafka files placed","Kafka settings are also stored in the db. This way they can be changed at runtime using RESTCONF or UniConfig shell. Kafka setting are stored in the settings table.","maxAge - Maximum age of a record in the notifications table (in hours). Records older than this value are deleted. The default value is 100.","maxCount - Maximum number of records in the notifications table. If the number of records exceeds this value, the oldest record in the table is deleted. The default value is 10,000.","maxNetconfSubscriptionsHardLimit - Maximum number of subscriptions that a single UniConfig node can handle.","maxSubscriptionsPerInterval - The maximum number of free subscriptions that can be acquired in a single iteration of the monitoring system loop. If the number of free subscriptions is smaller than this value, all free subscriptions are processed. If the number of free subscriptions is larger than this value, only the specified number of subscriptions are acquired. The rest can be acquired during the next iterations of the monitoring system loop or by other UniConfing instances in the cluster. The default value is 10.","maxThreadPoolSize - The maximum thread pool size in the executor.","netconfNotificationsEnabled","netconfNotificationsTopicName - topic name for NETCONF notifications","optimalNetconfSubscriptionsApproachingMargin - The lower margin to calculate optimal range start. The default value is 0.05.","optimalNetconfSubscriptionsReachedMargin - The higher margin to calculate optimal range end. The default value is 0.10.","queueCapacity - The maximum capacity for the work queue in the executor.","rebalanceOnUCNodeGoingDownGracePeriod - Grace period for a UniConfig node going down. Other nodes will not restart subscriptions until the grace period has passed after a dead Uniconfig node was last seen. The default value is 120 seconds.","requestTimeout - How long the producer waits for acknowledgement of a request (in ms). If no acknowledgement is received before the timeout period is over, the producer will resend the request or, if retries are exhausted, fail it.","subscriptionsMonitoringInterval - How often the monitoring system loop is run and attempts to acquire free subscriptions. The value is given in seconds, the default value is 5.","These properties are under notificationDbTreshold. Both of these are implemented using database triggers. Triggers are running on inserts to notifications table.","Three (3) properties related to the monitoring system in clustered environments:","Three (3) properties related to the monitoring system:","Three (3) properties related to the timeout of messages to Kafka","transactionNotificationsEnabled","transactionsTopicName - topic name for transactions about notifications","Two (2) properties related to the thread pool executor required to send messages to Kafka","Two (2) properties used to limit the number of records in the notifications table in the database:","You can also to set up embedded Kafka. These setting are grouped under the embeddedKafka property:","You can configure the names of all topics for every notification type. The following flags are used for this:","You can enable or disable each type of notification independently of others. The following flags are used for this:"]},{"i":"kafka-client---example","l":"Kafka client - example","p":["To read notifications from kafka, you can use the command line consumer. Run the following command in the Kafka installation directory:","It is important to properly set up the hostname, port and topic name. Output after creation of NETCONF notification looks something like this:"]}],[{"l":"Operational data about transactions"},{"l":"Operational data about transactions","p":["To have a better overview of UniConfig transactions, there are operational data about all open transactions.","Data about transactions contain:","identifier (uuid)","creation time","last access time","idle timeout","hard timeout","list of changed nodes (incl. topologies)","additional context (random string, text column)","Data about transactions can be read using RESTCONF:","Example data about transactions:"]}],[{"l":"Templates Manager"},{"l":"Overview","p":["Templates can be used for reusing of some configuration and afterwards easier application of this configuration into target UniConfig nodes.","Basic properties of templates as they are implemented in UniConfig:","All templates are stored under 'templates' topology and each template is represented by separate 'node' list entry.","Whole template configuration is placed under'frinx-uniconfig-topology:configuration' container in the Configuration datastore. Because of this, configuration of template can be accessed and modified in the same way like modification of UniConfig node.","Templates are validated against single schema context. Schema context, against which validation is enabled, is selected at creation of template using 'uniconfig-schema-repository' query parameter. Value of the query parameter defines name of the schema repository that is placed under UniConfig distribution in form of the directory.","Currently implemented template features:","Variables- They are used for parametrisation of templates.","Tags- Tags can be used for selection of an operation that should be applied for the specific subtree at application of template to UniConfig node.","Schema validation of leaves and leaf-lists is adjusted, so it can accept both string with variables and original YANG type."]},{"l":"Latest-schema","p":["Latest-schema defines name of the schema repository of which built schema context is used for template validation. Latest-schema is used only if there is not 'uniconfig-schema-repository' query parameter when creating template. If 'uniconfig-schema-repository' query parameter is defined, latest-schema is ignored."]},{"l":"Configuration of the latest-schema","p":["Latest-schema can be set using PUT request. It will be placed in Config datastore. Name of directory has to point to existing schema repository that is placed under UniConfig distribution.","GET request can be used for check if latest-schema is placed in config datastore."]},{"l":"Auto-upgrading of the latest-schema","p":["Latest-schema can be automatically upgraded by UniConfig after installation of new YANG repository. YANG repository is installed after deploying of new type of NETCONF/GRPC device or after manual invocation of RPC for loading of new YANG repository from directory.","In order to enable auto-upgrading process, 'latestSchemaReferenceModuleName' must be specified in the'config/lighty-uniconfig-config.json' file:","After new YANG repository is installed, then UniConfig will look for revision of module'latestSchemaReferenceModuleName' in the repository. If found revision is more recent than the last cached revision, UniConfig will automatically write identifier of the fresh repository into 'latest-schema' configuration. Afterwards, 'latest-schema' is used by UniConfig the same way as it would be written manually via RESTCONF."]},{"l":"Variables","p":["Using variables it is possible to parametrise values in the template. Structural parametrisation is not currently supported.","Properties:","Format of the variable: '{$variable-id}'.","Variables can be set to each leaf and leaf-list in the template.","Single leaf or leaf-list may contain multiple variables.","Key of the list can also contain variable.","Variables are substituted by provided values at the application of template to UniConfig node.","It is possible to escape characters of the variable pattern ('$','{', '}'), so they will be interpreted as value and not part of the variable.","Variable identifier may contain any UTF-8 characters. Characters'$', '{', '}' must be escaped, if they are part of the variable identifier."]},{"l":"Examples with variables","p":["A. Leaf with one variable","Application of following values to variables 'var-a' and 'var-b':'var-a' = ['10', '20', '30'], 'var-b' = ['50', '70', '60'].","Application of values - 'var-x': 'next', 'var-y': '7', 'var-1': '10','var-2': '9'. Leaf 'leaf-a' has 'string' type and 'leaf-b' has 'int32' type.","Application of values '10' and 'false' to 'var-1', and 'var-2'. Leaf'leaf-a' has 'int32' type and 'leaf-b' has 'boolean' type.","B. Leaf with multiple variables","Both variables must be substituted by the same number of values.","C. Leaf-list with one variable","D. Leaf-list with multiple variables","E. Leaf-list with entry that contains multiple variables","F. Leaves and leaf-lists with escaped special characters","If leaf-list is marked as \"ordered-by user\", then the order of leaf-list elements is preserved during substitution process.","It is possible to substitute both variables with one or multiple variables.","Leaf 'leaf-a' contains 2 variables and surrounding text that is not part of any variable.","Leaf 'leaf-b' contains 2 variable without additional text - substituted values of these variables are concatenated at application of template.","Leaf-list 'leaf-list-a' contains 2 variables inside one leaf-list entry: 'var-a' and 'var-b'.","Leaf-list 'leaf-list-a' contains 2 variables with identifiers'var-a' and 'var-2'. String \"str3\" represents constant value.","Leaf-list 'leaf-list-a' contains variable with identifier 'var-x'.","Substitution of 'var-1' by 'prefix' and 'var-{2}' by '10':","Substitution of 'var-a' with texts 'str1', 'str2' and 'var-b' with'str4' results in ('string' type):","Substitution of 'var-x' with numbers '10', '20', '30' results in('int32' type):","The following example demonstrates escaping of special characters outside of the variable identifier (leaf-list 'leaf-list-a') and inside of the variable identifier (leaf 'leaf-a').","The following example shows 2 leaves with 2 variables: 'var-1' and'var-2'.","This variable can be substituted by one or multiple values. If multiple values are provided in the apply-template RPC, they are'unwrapped' to the leaf-list in form of next leaf-list entries.","Unescaped identifier of the leaf 'leaf-a': 'var-{2}'."]},{"l":"Tags","p":["By default, all templates have assigned 'merge' tag to the root'configuration' container - if template doesn't explicitly define next tags in the data-tree, then the whole template is merged to target UniConfig node configuration at execution of apply-template RPC. However, it is possible to set custom tags to data-tree elements of the template.","Properties:","Tags are represented in UniConfig using node attributes with the following identifier: 'template-tags:operation'.","In RESTCONF, attributes are encoded using special notation that is explained in the 'RESTCONF' user guide.","Tags are inherited through the data-tree of the template. If data-tree element doesn't define any tag, then it is inherited from parent element.","Only single tag can be applied to one data node.","Tags can be applied to following YANG structures: container, list, leaf-list, leaf, list entry, leaf-list entry.","Currently, the following tags are supported:","merge: Merges with a node if it exists, otherwise creates the node.","replace: Replaces a node if it exists, otherwise creates the node.","delete: Deletes the node.","create: Creates a node. The node can not already exist. An error is raised if the node exists.","update: Merges with a node if it exists. If it does not exist, it will not be created."]},{"l":"Examples with tags","p":["A. Tags applied to container, list, and leaf","Template with name 'user_template' that contains 'merge', 'replace', and 'create' tags:","Description of all operations in the correct order that are done based on the defined tags:","Container 'configuration' will be merged to target UniConfig node(implicit root operation).","Container 'system:system' will be updated - its content is merged only, if it has already been created.","The whole list 'users' will replaced in the target UniConfig node.","Leaf named 'password' will be created at the target UniConfig node - it cannot exist under 'users' list entry, otherwise the error will be raised.","B: Tags applied to leaf-list, leaf-list entry, and list entry:","The following JSON represents content of sample template with multiple tags:","'replace' tag is applied to single list 'my-list' entry","'merge' tag is applied to whole 'leaf-list-a' leaf-list","'create' tag is applied to whole 'leaf-list-b' leaf-list","'delete' tag is applied to single leaf-list 'leaf-list-b' entry with value '10'"]},{"l":"Creation of template","p":["A new template can be created by sending PUT request to new template node under 'templates' topology with populated 'configuration' container. Name of the template equals to name of the 'node' list entry. This RESTCONF call must contain specified schema cache repository using the 'uniconfig-schema-repository' query parameter in order to successfully match sent data-tree with correct schema context (it is usually associated with some type of NETCONF device)."]},{"i":"example---creation-of-template","l":"Example - creation of template","p":["The following example shows creation of new template with name'interface_template' using 'schemas_1' schema repository. The body of the PUT request contains whole 'configuration' container."]},{"i":"readupdatedelete-template","l":"Read/update/delete template","p":["All CRUD operations with templates can be done using standard RESTCONF PUT/DELETE/POST/PLAIN PATCH methods. As long as template contains some data under 'configuration' container, next RESTCONF calls, that work with templates, don't have to contain 'uniconfig-schema-repository' query parameter, since type of the device is already known."]},{"i":"examples---restconf-operations","l":"Examples - RESTCONF operations","p":["Reading specific subtree under 'interface_template' - unit with name'{$unit-id}' that is placed under interface with name'eth-0/{$interface-id}'.","Changing 'update' tag of the 'address' list entry to 'create' tag using PLAIN-PATCH RESTCONF method."]},{"l":"RPC get-template-info","p":["This RPC shows information about all variables in specified template. The RPC input has to contain template name."]},{"i":"creation-of-template-1","l":"Creation of template"},{"l":"Usage of RPC"},{"l":"RPC get-template-nodes","p":["This RPC returns all templates from the template topology. No input body is required."]},{"l":"Successful example","p":["There are no templates in the template topology."]},{"i":"successful-example-1","l":"Successful example","p":["There is a template called 'test-template' in the template topology."]},{"l":"Upgrading template to latest yang repository","p":["Template can be upgraded to latest YANG repository using 'upgrade-template' RPC. This procedure consists of:","Read template- Reading of template configuration from'templates' topology in Configuration datastore.","Version-drop- Conversion of template into target schema context that is created by specified yang-repository. Because of this feature, it is possible to change template between different versions of devices with different revisions of YANG schemas but with similar structure. Version-drop is also aware of 'ignoredDataOnWriteByExtensions' RESTCONF filtering mechanism.","Removal of previous template / writing new template- If'upgraded-template-name' is not specified in RPC input, previous template will be deleted and replaced by new one. If it is specified, previous template will not be deleted.","Description of input RPC fields:","template-name: Name of the existing input template. This field is mandatory.","upgraded-template-name: Name of upgraded/new template. This field is optional.","yang-repository: Name of YANG repository against which version-dropping is used. This field is optional. If no yang-repository is specified, latest yang repository will be used.","Description of fields in RPC response:","No fields are used, only HTTP response codes [200 - OK, 404 - Fail]"]},{"i":"usage-of-rpc-1","l":"Usage of RPC"},{"l":"Auto-upgrading of templates","p":["This feature is used to automatically upgrade all stored templates using the old YANG repository to the latest YANG repository with help from the version-drop procedure. For the auto-upgrading process to work, the latest YANG repository must already be configured. The upgrade process must be explicitly enabled in the configuration file and occurs when UniConfig is started.","There is also an option to back up templates before the upgrade with the standard rotation procedure. The names of backed-up templates follow the pattern ' backup', where '' represents the name of the original template and '' represents the backup index. The most recent backup index is always '0' and older ones are rotated by incrementing the corresponding index. If a backed-up template reaches the configured limit (maximum number of backups), it is permanently removed from the database.","Overview of available settings ('config/lighty-uniconfig-config.json'):","enabledTemplatesUpgrading- Enables the auto-upgrading process at UniConfig startup. If disabled, the other setting is ignored.","backupTemplatesLimit- Maximum number of stored backup templates. If exceeded, older templates are removed during the rotation procedure. If set to 0, templates are not backed up at all."]},{"l":"Application of template","p":["Application of tags- Data-tree of the template is streamed and data is applied to target UniConfig node based on set tags on data elements, recursively. UniConfig node configuration is updated only in the Configuration datastore.","Description of fields in RPC response:","Description of input RPC fields:","error-message(optional): Description of the error that occurred during application of template.","error-type(optional): Type of the error.","leaf-list-values: List of values - it can be used only with leaf-lists. Special characters ('$', '{', '}') must be escaped.","leaf-value: Scalar value of the variable. Special characters('$', '{', '}') must be escaped.","node-id: Target UniConfig node identifier (key of the list).","node-result: Per target UniConfig node results. The rule is following - all input UniConfig node IDs must also present in the response.","overall-status: Overall status of the operation as the whole. If application of the template fails on at least one UniConfig node, then overall-status will be set to 'fail' (no modification will be done in datastore). Otherwise, it will be set to 'complete'.","Processing template configuration","Read template- Reading of template configuration from'templates' topology in Configuration datastore.","RPC apply-template","status: Status of the operation: 'complete' or 'fail'.","String-substitution- Substitution of variables by provided values or default values, if there aren't any provided values for some variables and leaf/leaf-list defines a default values. If some variables cannot be substituted (for example, user forgot to specify input value of variable), an error will be returned.","Template can be applied to UniConfig nodes using 'apply-template' RPC. This procedure does following steps:","template-node-id: Name of the existing input template.","The following sequence diagram and nested activity diagram show process of 'apply-template' RPC in detail.","uniconfig-node-id: Target UniConfig node identifier.","uniconfig-node: List of target UniConfig nodes to which template is applied ('uniconfig-node-id' is the key).","variable-id: Unescaped variable identifier.","variable: List of variables and substituted values that must be used during application of template to UniConfig node. Variables must be set per target UniConfig node since it is common, that values of variables should be different on different devices. Leaf'variable-id' represents the key of this list.","Version-drop- Conversion of template into target schema context that is used by target UniConfig node. This component also drops unsupported data from input template. Because of this feature, it is possible to apply template between different versions of devices with different revisions of YANG schemas but with similar structure. Version-drop is also aware of 'ignoredDataOnWriteByExtensions' RESTCONF filtering mechanism."]},{"i":"examples---apply-template-calls","l":"Examples - apply-template calls","p":["Successful application of the template 'service_group' to 2 UniConfig nodes - 'dev1' and 'dev2'.","Failed application of the template 'temp1' - template doesn't exist.","Failed application of the template 'service_group' to 2 UniConfig nodes","'dev1' and 'dev2' - user hasn't provided values for all required variables.","Failed application of the template 'redundancy_template' to UniConfig node 'dev1' - type of the substituted variable value is invalid (failed regex constraint)."]},{"l":"RPC create-multiple-templates","p":["One or more new templates can be created by this RPC. Templates are parsed and written in parallel for better performance. If specified templates already exist, their configuration is replaced. Execution of RPC is atomic - either all templates are successfully created or no changes are made in the UniConfig transaction.","Description of input RPC fields:","template-name:Name of the created template.","yang-repository: YANG schema repository used for parsing of template configuration. Default value: 'latest'.","template-configuration: Whole template configuration.","tags: List of template tags that are written on the specified paths in all created templates. Specified tag type must be prefixed with 'template-tags' module name based on RFC-8040 formatting of identityref.","Only template-name and template-configuration are mandatory fields."]},{"l":"Examples","p":["Successful creation of templates.","Failed to find YANG schema repository.","Failed to parse template configuration.","Creation of 2 templates with separately specified template tags - 'replace' tag is added to '/acl/category' and'/services/group=default/types' elements, while 'create' is added to '/services' element."]}],[{"i":"uniconfig---sending-and-receiving-data-restconf","l":"UniConfig - Sending and receiving data (RESTCONF)"},{"l":"Overview","p":["RESTCONF is described in RESTCONF RFC 8040. Put simply, RESTCONF represents a REST API for accessing datastores and UniConfig operations."]},{"l":"Datastores","p":["There are two datastores:","Config: Contains data representing the intended state. Possible to read and write via RESTCONF.","Operational: Contains data representing the actual state. Possible only to read via RESTCONF.","Each request must start with the URI /rests/. By default, RESTCONF listens on port 8181 for HTTP requests."]},{"l":"REST Operations","p":["RESTCONF supports: OPTIONS, GET, PUT, POST, PATCH, and DELETE operations. Request and response data can be either in the XML or JSON format.","XML structures according to YANG are defined at: XML-YANG.","JSON structures are defined at: JSON-YANG.","Data in the request must set the Content-Type field correctly in the HTTP header with the allowed value of the media type. The media type of the requested data must be set in the Accept field. Get the media types for each resource by calling the OPTIONS operation.","Most of the paths use Instance Identifier. is used in the explanation of the operations and must adhere to these rules:","Identifier must start with :> where is a name of the YANG module and is the name of a node in the module. If the next node name is placed in the same namespace as the previous one, it is sufficient to just use after the first definition of:. Each has to be separated by /."," can represent a data node which is a list node, container, leaf, or leaf-list YANG built-in type. If the data node is a list, there must be defined ordered keys of the list behind the data node name, for example, =