diff --git a/docs/advanced_usages/as-python-lib.md b/docs/advanced_usages/as-python-lib.md index a9a022185..b5ba2d0c0 100644 --- a/docs/advanced_usages/as-python-lib.md +++ b/docs/advanced_usages/as-python-lib.md @@ -15,81 +15,124 @@ inventory = AntaInventory.parse( password="password", enable_password="enable", timeout=1, + insecure=False, ) ``` Then it is easy to get all devices or only active devices with the following method: ```python -# print the non reachable devices -for device in inventory.get_inventory(established_only=False): - if device.established is False: - print(f"Could not connect to device {device.host}") - -# run an EOS commands list on the reachable devices from the inventory -for device in inventory.get_inventory(established_only=True): - device.session.runCmds( - 1, ["show version", "show ip bgp summary"] +""" +Example +""" +# This is needed to run the script for python < 3.10 for typing annotations +from __future__ import annotations + +import asyncio + +from anta.inventory import AntaInventory + + +async def main_loop(inv: AntaInventory) -> None: + """ + Take an inventory and a list of commands and: + 1. try to connect to every device in the inventory + 2. print a message for every device where connection could not be established + """ + await inv.connect_inventory() + + # Print a list of devices that could not be connected to + for device in inv.get_inventory(established_only=False).values(): + if device.established is False: + print(f"Could not connect to device {device.name}") + +if __name__ == "__main__": + # Create the inventory + inventory = AntaInventory.parse( + inventory_file="inv.yml", + username="arista", + password="@rista123", + timeout=15, ) + + # Run the main asyncio entry point + res = asyncio.run(main_loop(inventory, commands)) ``` -You can find the ANTA Inventory module [here](../api/inventory.md). -??? note "How to create your inventory file" - Please visit this [dedicated section](../usage-inventory-catalog.md) for how to use inventory and catalog files. +To run an EOS commands list on the reachable devices from the inventory +```python +""" +Example +""" +# This is needed to run the script for python < 3.10 for typing annotations +from __future__ import annotations +import asyncio +from copy import deepcopy +from pprint import pprint -## Use tests from ANTA +from anta.inventory import AntaInventory +from anta.models import AntaCommand -All the test functions are based on the exact same input and returns a generic structure with different information. -### Test input +async def main_loop(inv: AntaInventory, commands: list[str]) -> dict[str, list[AntaCommand]]: + """ + Take an inventory and a list of commands and: + 1. try to connect to every device in the inventory + 2. collect the results of the commands towards each device -Any test input is based on an `InventoryDevice` object and a list of options. Here is an example to check uptime and check it is higher than `minimum` option. + Returns: + a dictionary where key is the device name and the value is the list of AntaCommand ran towards the device + """ + await inv.connect_inventory() + + # Make a list of coroutine to run commands towards each connected device + coros = [] + # dict to keep track of the commands per device + result_dict = {} + print("DONE") + for name, device in inv.get_inventory(established_only=True).items(): + anta_commands = [AntaCommand(command=command, ofmt="json") for command in commands] + result_dict[name] = anta_commands + coros.append(device.collect_commands(anta_commands)) + + # Run the coroutines + await asyncio.gather(*coros) + + return result_dict + + +if __name__ == "__main__": + # Create the inventory + inventory = AntaInventory.parse( + inventory_file="inv.yml", + username="arista", + password="@rista123", + timeout=15, + ) -```python -def verify_uptime(device: InventoryDevice, minimum: int = None) -> TestResult: + # Create a list of commands with json output + commands = ["show version", "show ip bgp summary"] + + # Run the main asyncio entry point + res = asyncio.run(main_loop(inventory, commands)) + + pprint(res) ``` -In general, [`InventoryDevice`](../api/inventory.models.md) is an object created by `AntaInventory`. But it can be manually generated by following required data model. +!!! tip + If you are unfamiliar with asyncio, refer to the Python documentation relevant to your Python version - https://docs.python.org/3/library/asyncio.html -Here is an example of a list of `InventoryDevice` +You can find the ANTA Inventory module [here](../api/inventory.md). -```python -[ - { - "InventoryDevice(host=IPv4Address('192.168.0.17')", - "username='ansible'", - "password='ansible'", - "session=", - "url='https://ansible:ansible@192.168.0.17/command-api'", - "established=True", - "is_online=True", - "hw_model=cEOS-LAB", - }, - - { - "InventoryDevice(host=IPv4Address('192.168.0.2')", - "username='ansible'", - "password='ansible'", - "session=None", - "url='https://ansible:ansible@192.168.0.2/command-api'", - "established=False" - "is_online=False", - "tags": ['dc1', 'spine', 'pod01'], - "hw_model=unset", - } -] -``` +??? note "How to create your inventory file" + Please visit this [dedicated section](../usage-inventory-catalog.md) for how to use inventory and catalog files. -### Test output -All tests return a TestResult structure with the following elements: +## Use tests from ANTA -- `result`: Can be `success`, `skipped`, `failure`, `error` and report result of the test -- `host`: IP address of the tested device -- `test`: Test name runs on `host` -- `message`: Optional message returned by the test. +All the test classes inherit from the same abstract Base Class AntaTest. The Class definition indicates which commands are required for the test and the user should focus only on writing the `test` function with optional keywords argument. The instance of the class upon creation instantiates a TestResult object that can be accessed later on to check the status of the test ([unset, skipped, success, failure, error]). ### Test structure @@ -123,12 +166,18 @@ class VerifyTemperature(AntaTest): Verifies device temparture is currently OK. """ + # The test name name = "VerifyTemperature" + # A small description of the test, usually the first line of the class docstring description = "Verifies device temparture is currently OK" + # The category of the test, usually the module name categories = ["hardware"] + # The command(s) used for the test. Could be a template instead commands = [AntaCommand(command="show system environment temperature", ofmt="json")] + # Decorator @AntaTest.anta_test + # abstract method that must be defined by the child Test class def test(self) -> None: """Run VerifyTemperature validation""" command_output = cast(Dict[str, Dict[Any, Any]], self.instance_commands[0].output) @@ -139,7 +188,7 @@ class VerifyTemperature(AntaTest): self.result.is_failure(f"Device temperature is not OK, systemStatus: {temperature_status }") ``` -When you run the test, object will automatically call its `anta.models.AntaTest.collect()` method to get device output. This method does a loop to call `anta.inventory.models.InventoryDevice.collect()` methods which is in charge of managing device connection and how to get data. +When you run the test, object will automatically call its `anta.models.AntaTest.collect()` method to get device output for each command if no pre-collected data was given to the test. This method does a loop to call `anta.inventory.models.InventoryDevice.collect()` methods which is in charge of managing device connection and how to get data. ??? info "run test offline" You can also pass eos data directly to your test if you want to validate data collected in a different workflow. An example is provided below just for information: @@ -149,7 +198,8 @@ When you run the test, object will automatically call its `anta.models.AntaTest. asyncio.run(test.test()) ``` -test function is always the same and __must__ be defined with the `@AntaTest.anta_test` decorator. This function takes at least one argument which is a `anta.inventory.models.InventoryDevice` object and can have multiple additional parameters depending of your test definition. All parameters __must__ come with a default value and the test function __should__ validate the parameters values. +The `test` function is always the same and __must__ be defined with the `@AntaTest.anta_test` decorator. This function takes at least one argument which is a `anta.inventory.models.InventoryDevice` object. +In some cases a test would rely on some additional inputs from the user, for instance the number of expected peers or some expected numbers. All parameters __must__ come with a default value and the test function __should__ validate the parameters values (at this stage this is the only place where validation can be done but there are future plans to make this better). ```python class VerifyTemperature(AntaTest): @@ -162,10 +212,18 @@ class VerifyTransceiversManufacturers(AntaTest): ... @AntaTest.anta_test def test(self, manufacturers: Optional[List[str]] = None) -> None: + # validate the manufactures parameter pass ``` -The test itself does not return any value, but the result is directly availble from your object and exposes a `anta.result_manager.models.TestResult` object with result, name of the test and optional messages. +The test itself does not return any value, but the result is directly availble from your AntaTest object and exposes a `anta.result_manager.models.TestResult` object with result, name of the test and optional messages: + +- `name` (str): Device name where the test has run. +- `test` (str): Test name runs on the device. +- `test_category` (List[str]): List of test categories the test belongs to. +- `test_description` (str): Test description. +- `results` (str): Result of the test. Can be one of ["unset", "success", "failure", "error", "skipped"]. +- `messages` (List[str], optional): Messages to report after the test if any. ```python from anta.tests.hardware import VerifyTemperature @@ -197,10 +255,31 @@ cmd1 = AntaCommand(command="show zerotouch") cmd2 = AntaCommand(command="show running-config diffs", ofmt="text") ``` +!!! tip "Command revision and version" + * Most of EOS commands return a JSON structure according to a model (some commands may not be modeled hence the necessity to use `text` outformat sometimes. + * The model can change across time (adding feature, ... ) and when the model is changed in a non backward-compatible way, the __revision__ number is bumped. The initial model starts with __revision__ 1. + * A __revision__ applies to a particular CLI command whereas a __version__ is global to an eAPI call. The __version__ is internally translated to a specific __revision__ for each CLI command in the RPC call. The currently supported __version__ vaues are `1` and `latest`. + * A __revision takes precedence over a version__ (e.g. if a command is run with version="latest" and revision=1, the first revision of the model is returned) + * By default eAPI returns the first revision of each model to ensure that when upgrading, intergation with existing tools is not broken. This is done by using by default `version=1` in eAPI calls. + + ANTA uses by default `version="latest"` in AntaCommand. For some commands, you may want to run them with a different revision or version. + + For instance the `VerifyRoutingTableSize` test leverages the first revision of `show bfd peers`: + + ``` + # revision 1 as later revision introduce additional nesting for type + commands = [AntaCommand(command="show bfd peers", revision=1)] + ``` + + #### `anta.models.AntaTemplate` Because some command can require more dynamic than just a command with no parameter provided by user, ANTA supports command template: you define a template in your test class and user provide parameters when creating test object. +!!! warning "Warning on AntaTemplate" + * In its current versiom, an AntaTest class supports only __ONE__ AntaTemplate. + * The current interface to pass template parameter to a template is an area of future improvements. Feedbacks are welcome. + ```python class RunArbitraryTemplateCommand(AntaTest): diff --git a/docs/advanced_usages/custom-tests.md b/docs/advanced_usages/custom-tests.md index d5ada54d9..45724a071 100644 --- a/docs/advanced_usages/custom-tests.md +++ b/docs/advanced_usages/custom-tests.md @@ -70,8 +70,8 @@ class VerifyTemperature(AntaTest): Besides these 3 main imports, anta provides some additional and optional decorators: -- `anta.test.skip_on_platforms`: To skip a test for a function not available for some platform -- `anta.tests.check_bgp_family_enable`: To run tests only if specific BGP family is active. +- `anta.decorators.skip_on_platforms`: To skip a test for a function not available for some platform +- `anta.decorators.check_bgp_family_enable`: To run tests only if specific BGP family is active. ```python @@ -114,9 +114,6 @@ __Commands to run__ - `commands`: a list of command to run. This list _must_ be a list of `AntaCommand` which is described in the next part of this document. - `template`: a command template (`AntaTemplate`) to run where variables are provided during test execution. -!!! warning "" - It is either `commands` or `template`. But not both. - ```python from __future__ import annotations @@ -138,7 +135,8 @@ class (AntaTest): AntaCommand( command="", ofmt="", - version="" + version="", + revision="", # revision has precedence over version ) ] ``` @@ -157,6 +155,7 @@ The code here can be very simple as well as very complex and will depend of what ```python class (AntaTest): ... + @AntaTest.anta_test def test(self) -> None: pass ``` @@ -166,20 +165,28 @@ If you want to support option in your test, just declare your options in your te ```python class (AntaTest): ... - def test(self, my_param1: str) -> None: + @AntaTest.anta_test + def test(self, my_param1: Optional[str] = None) -> None: pass ``` +The options __must__ be optional keyword arguments. + ### Check inputs If your test has some user inputs, you first have to validate the supplied values are valid. If it is not valid, we expect `TestResult` to return `skipped` with a custom message. ```python -# Check if test option is correct -if not minimum: - self.result.is_skipped("verify_dynamic_vlan was run without minimum value set") -else: +class (AntaTest): ... + @AntaTest.anta_test + def test(self, minimum: Optional[int] = None) -> None: + # Check if test option is correct + if not minimum: + self.result.is_skipped("verify_dynamic_vlan was run without minimum value set") + return + # continue test.. + ... ``` ### Implement your logic @@ -189,15 +196,24 @@ Here you implement your own logic. In general, the first action is to send comma In the example below, we request the list of vlans configured on device and then count all the vlans marked as dynamic ```python -# Grab data for your command -command_output = cast(Dict[str, Dict[Any, Any]], self.instance_commands[0].output) - -# Do your test: In this example we count number of vlans with field dynamic set to true -num_dyn_vlan = len([ vlan for vlan,data in command_output['vlans'].items() if command_output['dynamic'] is True]) -if num_dyn_vlan >= minimum: - self.result.is_success() -else: - self.result.is_failure(f"Device has {num_dyn_vlan} configured, we expect at least {minimum}") +class (AntaTest): + ... + @AntaTest.anta_test + def test(self, minimum: Optional[int] = None) -> None: + # Check if test option is correct + if not minimum: + self.result.is_skipped("verify_dynamic_vlan was run without minimum value set") + return + + # Grab data for your command + command_output = cast(Dict[str, Dict[Any, Any]], self.instance_commands[0].output) + + # Do your test: In this example we count number of vlans with field dynamic set to true + num_dyn_vlan = len([ vlan for vlan,data in command_output['vlans'].items() if command_output['dynamic'] is True]) + if num_dyn_vlan >= minimum: + self.result.is_success() + else: + self.result.is_failure(f"Device has {num_dyn_vlan} configured, we expect at least {minimum}") ``` As you can see there is no error management to do in your code. Everything is packaged in `anta_tests` and below is a simple example of error captured with an incorrect JSON key in the code above: @@ -209,7 +225,7 @@ ERROR Exception raised for test verify_dynamic_vlan (on device 192.168.0.10) !!! info "Get stack trace for debugging" If you want to access to the full exception stack, you can run your test with logging level set to `DEBUG`. With ANTA cli, it is available with following option: ```bash - $ anta nrfu text --catalog test_custom.yml --log-level debug + $ ANTA_DEBUG=True anta nrfu text --catalog test_custom.yml --log-level debug ``` ## Create your catalog @@ -239,4 +255,4 @@ leaf04 :: verify_dynamic_vlan :: SUCCESS ``` !!! warning "Install your python package" - Anta uses Python path to access to your test. So it is critical to have your tests library installed correctly as explained at the begining of this page. + Anta uses Python path to access to your test. So it is critical to have your tests library installed correctly as explained at the begining of this page (in short, your module should be in your `PYTHONPATH` to be able to be loaded).