From a5cdd2ae2f6c837ad4f38c1f0659a91c26c05456 Mon Sep 17 00:00:00 2001 From: Mahesh Date: Wed, 27 Dec 2023 07:21:16 +0000 Subject: [PATCH 1/5] issue-503: Refactored bfd testcase with updated pass/fail criteria --- anta/tests/bfd.py | 84 +++++++ anta/tests/routing/generic.py | 25 -- examples/tests.yaml | 5 +- .../units/anta_tests/routing/test_generic.py | 123 +--------- tests/units/anta_tests/test_bfd.py | 225 ++++++++++++++++++ 5 files changed, 314 insertions(+), 148 deletions(-) create mode 100644 anta/tests/bfd.py create mode 100644 tests/units/anta_tests/test_bfd.py diff --git a/anta/tests/bfd.py b/anta/tests/bfd.py new file mode 100644 index 000000000..f47a5becc --- /dev/null +++ b/anta/tests/bfd.py @@ -0,0 +1,84 @@ +# Copyright (c) 2023 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +""" +BFD test functions +""" +# Mypy does not understand AntaTest.Input typing +# mypy: disable-error-code=attr-defined +from __future__ import annotations + +from datetime import datetime +from typing import Optional + +from anta.models import AntaCommand, AntaTest + + +class VerifyBFDPeersHealth(AntaTest): + """ + Verifies there is no BFD peer in the down state, remote disc is not zero and last down should be above the threshold (all VRF, IPv4/ IPv6 peers). + Expected results: + * success: The test will pass if BFD peers are not down, remote disc is not zero and last down above the defined threshold (all VRF, IPv4/ IPv6 peers). + * failure: The test will fail if BFD peers are down, remote disc is zero and last down below the defined threshold (all VRF, IPv4/ IPv6 peers). + """ + + name = "VerifyBFDPeersHealth" + description = "Verifies there is no BFD peer in the down state, remote disc is not zero and last down should be above the threshold (all VRF, IPv4/IPv6 peers)." + categories = ["bfd"] + + # revision 1 as later revision introduces additional nesting for type + commands = [AntaCommand(command="show bfd peers", revision=1), AntaCommand(command="show clock")] + + class Input(AntaTest.Input): + """ + This class defines the input parameters of the testcase. + """ + + last_down: Optional[int] = None + """Optional last down threshold in hours""" + + @AntaTest.anta_test + def test(self) -> None: + # Initialize failure strings + down_failures = [] + up_failures = [] + + # Extract the current timestamp and command output + clock_output = self.instance_commands[1].json_output + current_timestamp = clock_output["utcTime"] + bfd_output = self.instance_commands[0].json_output + + # Check the initial result + self.result.is_success() + + # Iterate over IPv4 and IPv6 neighbors + for ip_type in ["ipv4Neighbors", "ipv6Neighbors"]: + for vrf, vrf_data in bfd_output["vrfs"].items(): + for peer, neighbor_data in vrf_data[ip_type].items(): + for peer_data in neighbor_data["peerStats"].values(): + peer_status = peer_data["status"] + remote_disc = peer_data["remoteDisc"] + peer_l3intf = peer_data.get("l3intf", "") + l3intf_info = f" with peer layer3 interface {peer_l3intf}" if peer_l3intf else "" + remote_disc_info = f" with remote disc {remote_disc}" if remote_disc == 0 else "" + remote_disc_info = f" and remote disc {remote_disc}" if remote_disc == 0 and peer_l3intf else remote_disc_info + last_down = peer_data["lastDown"] + hours_difference = (datetime.fromtimestamp(current_timestamp) - datetime.fromtimestamp(last_down)).total_seconds() / 3600 + + # Check if peer status is not up + if peer_status != "up": + down_failures.append(f"{peer} is {peer_status} in {vrf} VRF{l3intf_info}{remote_disc_info}.") + # Check if the last down is within the threshold + elif self.inputs.last_down and hours_difference > self.inputs.last_down: + up_failures.append(f"{peer} in {vrf} VRF was down {round(hours_difference)} hours ago{l3intf_info}{remote_disc_info}.") + # Check if remote disc is 0 + elif remote_disc == 0: + up_failures.append(f"{peer} in {vrf} VRF has remote disc {remote_disc}{l3intf_info}.") + + # Check if there are any failures + if down_failures: + down_failures_str = "\n".join(down_failures) + self.result.is_failure(f"Following BFD peers are not up:\n{down_failures_str}") + if up_failures: + up_failures_str = "\n".join(up_failures) + self.result.is_failure(f"\nFollowing BFD peers were down:\n{up_failures_str}") diff --git a/anta/tests/routing/generic.py b/anta/tests/routing/generic.py index c65040eb2..73e0090ed 100644 --- a/anta/tests/routing/generic.py +++ b/anta/tests/routing/generic.py @@ -81,31 +81,6 @@ def test(self) -> None: self.result.is_failure(f"routing-table has {total_routes} routes and not between min ({self.inputs.minimum}) and maximum ({self.inputs.maximum})") -class VerifyBFD(AntaTest): - """ - Verifies there is no BFD peer in down state (all VRF, IPv4 neighbors). - """ - - name = "VerifyBFD" - description = "Verifies there is no BFD peer in down state (all VRF, IPv4 neighbors)." - categories = ["routing", "generic"] - # revision 1 as later revision introduce additional nesting for type - commands = [AntaCommand(command="show bfd peers", revision=1)] - - @AntaTest.anta_test - def test(self) -> None: - command_output = self.instance_commands[0].json_output - self.result.is_success() - for _, vrf_data in command_output["vrfs"].items(): - for _, neighbor_data in vrf_data["ipv4Neighbors"].items(): - for peer, peer_data in neighbor_data["peerStats"].items(): - if (peer_status := peer_data["status"]) != "up": - failure_message = f"bfd state for peer '{peer}' is {peer_status} (expected up)." - if (peer_l3intf := peer_data.get("l3intf")) is not None and peer_l3intf != "": - failure_message += f" Interface: {peer_l3intf}." - self.result.is_failure(failure_message) - - class VerifyRoutingTableEntry(AntaTest): """ This test verifies that the provided routes are present in the routing table of a specified VRF. diff --git a/examples/tests.yaml b/examples/tests.yaml index d096e0ed0..d05c29dd9 100644 --- a/examples/tests.yaml +++ b/examples/tests.yaml @@ -50,6 +50,10 @@ anta.tests.aaa: - commands - dot1x +anta.tests.bfd: + - VerifyBFDPeersHealth: + last_down: 2 + anta.tests.configuration: - VerifyZeroTouch: - VerifyRunningConfigDiffs: @@ -263,7 +267,6 @@ anta.tests.routing: - VerifyRoutingTableSize: minimum: 2 maximum: 20 - - VerifyBFD: - VerifyRoutingTableEntry: vrf: default routes: diff --git a/tests/units/anta_tests/routing/test_generic.py b/tests/units/anta_tests/routing/test_generic.py index a5f875a54..65384daf9 100644 --- a/tests/units/anta_tests/routing/test_generic.py +++ b/tests/units/anta_tests/routing/test_generic.py @@ -8,7 +8,7 @@ from typing import Any -from anta.tests.routing.generic import VerifyBFD, VerifyRoutingProtocolModel, VerifyRoutingTableEntry, VerifyRoutingTableSize +from anta.tests.routing.generic import VerifyRoutingProtocolModel, VerifyRoutingTableEntry, VerifyRoutingTableSize from tests.lib.anta import test # noqa: F401; pylint: disable=W0611 DATA: list[dict[str, Any]] = [ @@ -77,127 +77,6 @@ "messages": ["Minimum 666 is greater than maximum 42"], }, }, - { - "name": "success-no-peer", - "test": VerifyBFD, - "eos_data": [{"vrfs": {}}], - "inputs": None, - "expected": {"result": "success"}, - }, - { - "name": "success-peers-up", - "test": VerifyBFD, - "eos_data": [ - { - "vrfs": { - "default": { - "ipv6Neighbors": {}, - "ipv4Neighbors": { - "7.7.7.7": { - "peerStats": { - "": { - "status": "up", - "authType": "None", - "kernelIfIndex": 0, - "lastDiag": "diagNone", - "authProfileName": "", - "lastUp": 1683288421.669188, - "remoteDisc": 345332116, - "sessType": "sessionTypeMultihop", - "localDisc": 1654273918, - "lastDown": 0.0, - "l3intf": "", - "tunnelId": 0, - } - } - }, - "10.3.0.1": { - "peerStats": { - "Ethernet1": { - "status": "up", - "authType": "None", - "kernelIfIndex": 11, - "lastDiag": "diagNone", - "authProfileName": "", - "lastUp": 1683288900.004889, - "remoteDisc": 1017672851, - "sessType": "sessionTypeNormal", - "localDisc": 4269977256, - "lastDown": 0.0, - "l3intf": "Ethernet1", - "tunnelId": 0, - } - } - }, - }, - "ipv4ReflectorNeighbors": {}, - "ipv6ReflectorNeighbors": {}, - "ipv6InitiatorNeighbors": {}, - "ipv4InitiatorNeighbors": {}, - } - } - } - ], - "inputs": None, - "expected": {"result": "success"}, - }, - { - "name": "failure", - "test": VerifyBFD, - "eos_data": [ - { - "vrfs": { - "default": { - "ipv6Neighbors": {}, - "ipv4Neighbors": { - "7.7.7.7": { - "peerStats": { - "": { - "status": "down", - "authType": "None", - "kernelIfIndex": 0, - "lastDiag": "diagNone", - "authProfileName": "", - "lastUp": 1683288421.669188, - "remoteDisc": 345332116, - "sessType": "sessionTypeMultihop", - "localDisc": 1654273918, - "lastDown": 0.0, - "l3intf": "", - "tunnelId": 0, - } - } - }, - "10.3.0.1": { - "peerStats": { - "Ethernet1": { - "status": "up", - "authType": "None", - "kernelIfIndex": 11, - "lastDiag": "diagNone", - "authProfileName": "", - "lastUp": 1683288900.004889, - "remoteDisc": 1017672851, - "sessType": "sessionTypeNormal", - "localDisc": 4269977256, - "lastDown": 0.0, - "l3intf": "Ethernet1", - "tunnelId": 0, - } - } - }, - }, - "ipv4ReflectorNeighbors": {}, - "ipv6ReflectorNeighbors": {}, - "ipv6InitiatorNeighbors": {}, - "ipv4InitiatorNeighbors": {}, - } - } - } - ], - "inputs": None, - "expected": {"result": "failure", "messages": ["bfd state for peer '' is down (expected up)."]}, - }, { "name": "success", "test": VerifyRoutingTableEntry, diff --git a/tests/units/anta_tests/test_bfd.py b/tests/units/anta_tests/test_bfd.py new file mode 100644 index 000000000..c1db79976 --- /dev/null +++ b/tests/units/anta_tests/test_bfd.py @@ -0,0 +1,225 @@ +# Copyright (c) 2023 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +""" +Tests for anta.tests.bfd.py +""" +# pylint: disable=C0302 +from __future__ import annotations + +from typing import Any + +# pylint: disable=C0413 +# because of the patch above +from anta.tests.bfd import VerifyBFDPeersHealth # noqa: E402 +from tests.lib.anta import test # noqa: F401; pylint: disable=W0611 + +DATA: list[dict[str, Any]] = [ + { + "name": "success", + "test": VerifyBFDPeersHealth, + "eos_data": [ + { + "vrfs": { + "default": { + "ipv4Neighbors": { + "192.0.255.7": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + }, + "ipv6Neighbors": {}, + } + } + }, + { + "utcTime": 1703658481.8778424, + }, + ], + "inputs": {"last_down": 2}, + "expected": {"result": "success"}, + }, + { + "name": "success-no-peer", + "test": VerifyBFDPeersHealth, + "eos_data": [ + {"vrfs": {}}, + { + "utcTime": 1703658481.8778424, + }, + ], + "inputs": None, + "expected": {"result": "success"}, + }, + { + "name": "failure-session-down", + "test": VerifyBFDPeersHealth, + "eos_data": [ + { + "vrfs": { + "default": { + "ipv4Neighbors": { + "192.0.255.7": { + "peerStats": { + "": { + "status": "down", + "remoteDisc": 0, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + }, + "ipv6Neighbors": {}, + } + } + }, + { + "utcTime": 1703658481.8778424, + }, + ], + "inputs": {"last_down": 2}, + "expected": { + "result": "failure", + "messages": ["Following BFD peers are not up:\n192.0.255.7 is down in default VRF with remote disc 0."], + }, + }, + { + "name": "failure-session-down-l3intf", + "test": VerifyBFDPeersHealth, + "eos_data": [ + { + "vrfs": { + "default": { + "ipv4Neighbors": { + "192.0.255.7": { + "peerStats": { + "": { + "status": "down", + "remoteDisc": 0, + "lastDown": 1703657258.652725, + "l3intf": "Ethernet2", + } + } + }, + }, + "ipv6Neighbors": {}, + } + } + }, + { + "utcTime": 1703658481.8778424, + }, + ], + "inputs": {"last_down": 2}, + "expected": { + "result": "failure", + "messages": ["Following BFD peers are not up:\n192.0.255.7 is down in default VRF with peer layer3 interface Ethernet2 and remote disc 0."], + }, + }, + { + "name": "failure-session-up-disc", + "test": VerifyBFDPeersHealth, + "eos_data": [ + { + "vrfs": { + "default": { + "ipv4Neighbors": { + "192.0.255.7": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 0, + "lastDown": 1703657258.652725, + "l3intf": "Ethernet2", + } + } + }, + }, + "ipv6Neighbors": {}, + } + } + }, + { + "utcTime": 1703658481.8778424, + }, + ], + "inputs": {}, + "expected": { + "result": "failure", + "messages": ["Following BFD peers were down:\n192.0.255.7 in default VRF has remote disc 0 with peer layer3 interface Ethernet2."], + }, + }, + { + "name": "failure-last-down-l3intf", + "test": VerifyBFDPeersHealth, + "eos_data": [ + { + "vrfs": { + "default": { + "ipv4Neighbors": { + "192.0.255.7": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "Ethernet2", + } + } + }, + }, + "ipv6Neighbors": {}, + } + } + }, + { + "utcTime": 1703667348.111288, + }, + ], + "inputs": {"last_down": 2}, + "expected": { + "result": "failure", + "messages": ["Following BFD peers were down:\n192.0.255.7 in default VRF was down 3 hours ago with peer layer3 interface Ethernet2."], + }, + }, + { + "name": "failure-last-down", + "test": VerifyBFDPeersHealth, + "eos_data": [ + { + "vrfs": { + "default": { + "ipv4Neighbors": { + "192.0.255.7": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + }, + "ipv6Neighbors": {}, + } + } + }, + { + "utcTime": 1703667348.111288, + }, + ], + "inputs": {"last_down": 2}, + "expected": { + "result": "failure", + "messages": ["Following BFD peers were down:\n192.0.255.7 in default VRF was down 3 hours ago."], + }, + }, +] From ecceab1f5f9e7b5f542ece8f47648150b07ebc13 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Mon, 15 Jan 2024 09:40:54 +0000 Subject: [PATCH 2/5] issue-503: Removed IPv6 support for now --- anta/tests/bfd.py | 56 ++++----- tests/units/anta_tests/test_bfd.py | 190 +++++++++++++++++++++++++++-- 2 files changed, 206 insertions(+), 40 deletions(-) diff --git a/anta/tests/bfd.py b/anta/tests/bfd.py index f47a5becc..bd7947d7d 100644 --- a/anta/tests/bfd.py +++ b/anta/tests/bfd.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """ @@ -16,14 +16,14 @@ class VerifyBFDPeersHealth(AntaTest): """ - Verifies there is no BFD peer in the down state, remote disc is not zero and last down should be above the threshold (all VRF, IPv4/ IPv6 peers). + Verifies there is no IPv4 BFD peer in the down state, remote disc is not zero and last down should be above the threshold for all VRF. Expected results: - * success: The test will pass if BFD peers are not down, remote disc is not zero and last down above the defined threshold (all VRF, IPv4/ IPv6 peers). - * failure: The test will fail if BFD peers are down, remote disc is zero and last down below the defined threshold (all VRF, IPv4/ IPv6 peers). + * success: The test will pass if IPv4 BFD peers are not down, remote disc is not zero and last down above the defined threshold for all VRF. + * failure: The test will fail if IPv4 BFD peers are down, remote disc is zero and last down below the defined threshold for all VRF. """ name = "VerifyBFDPeersHealth" - description = "Verifies there is no BFD peer in the down state, remote disc is not zero and last down should be above the threshold (all VRF, IPv4/IPv6 peers)." + description = "Verifies there is no IPv4 BFD peer in the down state, remote disc is not zero and last down should be above the threshold for all VRF." categories = ["bfd"] # revision 1 as later revision introduces additional nesting for type @@ -48,32 +48,30 @@ def test(self) -> None: current_timestamp = clock_output["utcTime"] bfd_output = self.instance_commands[0].json_output - # Check the initial result + # set the initial result self.result.is_success() - # Iterate over IPv4 and IPv6 neighbors - for ip_type in ["ipv4Neighbors", "ipv6Neighbors"]: - for vrf, vrf_data in bfd_output["vrfs"].items(): - for peer, neighbor_data in vrf_data[ip_type].items(): - for peer_data in neighbor_data["peerStats"].values(): - peer_status = peer_data["status"] - remote_disc = peer_data["remoteDisc"] - peer_l3intf = peer_data.get("l3intf", "") - l3intf_info = f" with peer layer3 interface {peer_l3intf}" if peer_l3intf else "" - remote_disc_info = f" with remote disc {remote_disc}" if remote_disc == 0 else "" - remote_disc_info = f" and remote disc {remote_disc}" if remote_disc == 0 and peer_l3intf else remote_disc_info - last_down = peer_data["lastDown"] - hours_difference = (datetime.fromtimestamp(current_timestamp) - datetime.fromtimestamp(last_down)).total_seconds() / 3600 - - # Check if peer status is not up - if peer_status != "up": - down_failures.append(f"{peer} is {peer_status} in {vrf} VRF{l3intf_info}{remote_disc_info}.") - # Check if the last down is within the threshold - elif self.inputs.last_down and hours_difference > self.inputs.last_down: - up_failures.append(f"{peer} in {vrf} VRF was down {round(hours_difference)} hours ago{l3intf_info}{remote_disc_info}.") - # Check if remote disc is 0 - elif remote_disc == 0: - up_failures.append(f"{peer} in {vrf} VRF has remote disc {remote_disc}{l3intf_info}.") + # Iterate over IPv4 BFD peers + for vrf, vrf_data in bfd_output["vrfs"].items(): + for peer, neighbor_data in vrf_data["ipv4Neighbors"].items(): + for peer_data in neighbor_data["peerStats"].values(): + peer_status = peer_data["status"] + remote_disc = peer_data["remoteDisc"] + peer_l3intf = peer_data.get("l3intf", "") + l3intf_info = f" with peer layer3 interface {peer_l3intf}" if peer_l3intf else "" + remote_disc_info = f" with remote disc {remote_disc}" if remote_disc == 0 else "" + remote_disc_info = f" and remote disc {remote_disc}" if remote_disc == 0 and peer_l3intf else remote_disc_info + last_down = peer_data["lastDown"] + hours_difference = (datetime.fromtimestamp(current_timestamp) - datetime.fromtimestamp(last_down)).total_seconds() / 3600 + # Check if peer status is not up + if peer_status != "up": + down_failures.append(f"{peer} is {peer_status} in {vrf} VRF{l3intf_info}{remote_disc_info}.") + # Check if the last down is within the threshold + elif self.inputs.last_down and hours_difference > self.inputs.last_down: + up_failures.append(f"{peer} in {vrf} VRF was down {round(hours_difference)} hours ago{l3intf_info}{remote_disc_info}.") + # Check if remote disc is 0 + elif remote_disc == 0: + up_failures.append(f"{peer} in {vrf} VRF has remote disc {remote_disc}{l3intf_info}.") # Check if there are any failures if down_failures: diff --git a/tests/units/anta_tests/test_bfd.py b/tests/units/anta_tests/test_bfd.py index c1db79976..8c9e0353a 100644 --- a/tests/units/anta_tests/test_bfd.py +++ b/tests/units/anta_tests/test_bfd.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """ @@ -35,7 +35,22 @@ }, }, "ipv6Neighbors": {}, - } + }, + "MGMT": { + "ipv4Neighbors": { + "192.0.255.71": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + }, + "ipv6Neighbors": {}, + }, } }, { @@ -49,7 +64,25 @@ "name": "success-no-peer", "test": VerifyBFDPeersHealth, "eos_data": [ - {"vrfs": {}}, + { + "vrfs": { + "MGMT": { + "ipv6Neighbors": { + "2266:25::12:0:ad12": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + }, + "ipv4Neighbors": {}, + } + } + }, { "utcTime": 1703658481.8778424, }, @@ -75,9 +108,34 @@ } } }, + "192.0.255.70": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, }, "ipv6Neighbors": {}, - } + }, + "MGMT": { + "ipv4Neighbors": { + "192.0.255.71": { + "peerStats": { + "": { + "status": "down", + "remoteDisc": 0, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + }, + "ipv6Neighbors": {}, + }, } }, { @@ -87,7 +145,9 @@ "inputs": {"last_down": 2}, "expected": { "result": "failure", - "messages": ["Following BFD peers are not up:\n192.0.255.7 is down in default VRF with remote disc 0."], + "messages": [ + "Following BFD peers are not up:\n192.0.255.7 is down in default VRF with remote disc 0.\n192.0.255.71 is down in MGMT VRF with remote disc 0." + ], }, }, { @@ -108,9 +168,34 @@ } } }, + "192.0.255.1": { + "peerStats": { + "": { + "status": "down", + "remoteDisc": 0, + "lastDown": 1703657258.652725, + "l3intf": "Ethernet3", + } + } + }, }, "ipv6Neighbors": {}, - } + }, + "MGMT": { + "ipv4Neighbors": { + "192.0.255.71": { + "peerStats": { + "": { + "status": "down", + "remoteDisc": 0, + "lastDown": 1703657258.652725, + "l3intf": "Ethernet12", + } + } + }, + }, + "ipv6Neighbors": {}, + }, } }, { @@ -120,7 +205,12 @@ "inputs": {"last_down": 2}, "expected": { "result": "failure", - "messages": ["Following BFD peers are not up:\n192.0.255.7 is down in default VRF with peer layer3 interface Ethernet2 and remote disc 0."], + "messages": [ + "Following BFD peers are not up:\n" + "192.0.255.7 is down in default VRF with peer layer3 interface Ethernet2 and remote disc 0.\n" + "192.0.255.1 is down in default VRF with peer layer3 interface Ethernet3 and remote disc 0.\n" + "192.0.255.71 is down in MGMT VRF with peer layer3 interface Ethernet12 and remote disc 0." + ], }, }, { @@ -141,6 +231,16 @@ } } }, + "192.0.255.71": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 0, + "lastDown": 1703657258.652725, + "l3intf": "Ethernet2", + } + } + }, }, "ipv6Neighbors": {}, } @@ -153,7 +253,11 @@ "inputs": {}, "expected": { "result": "failure", - "messages": ["Following BFD peers were down:\n192.0.255.7 in default VRF has remote disc 0 with peer layer3 interface Ethernet2."], + "messages": [ + "Following BFD peers were down:\n" + "192.0.255.7 in default VRF has remote disc 0 with peer layer3 interface Ethernet2.\n" + "192.0.255.71 in default VRF has remote disc 0 with peer layer3 interface Ethernet2." + ], }, }, { @@ -174,9 +278,44 @@ } } }, + "192.0.255.70": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "Ethernet1", + } + } + }, }, "ipv6Neighbors": {}, - } + }, + "MGMT": { + "ipv4Neighbors": { + "192.0.255.17": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "Ethernet3", + } + } + }, + "192.0.255.27": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "Ethernet4", + } + } + }, + }, + "ipv6Neighbors": {}, + }, } }, { @@ -186,7 +325,13 @@ "inputs": {"last_down": 2}, "expected": { "result": "failure", - "messages": ["Following BFD peers were down:\n192.0.255.7 in default VRF was down 3 hours ago with peer layer3 interface Ethernet2."], + "messages": [ + "Following BFD peers were down:\n" + "192.0.255.7 in default VRF was down 3 hours ago with peer layer3 interface Ethernet2.\n" + "192.0.255.70 in default VRF was down 3 hours ago with peer layer3 interface Ethernet1.\n" + "192.0.255.17 in MGMT VRF was down 3 hours ago with peer layer3 interface Ethernet3.\n" + "192.0.255.27 in MGMT VRF was down 3 hours ago with peer layer3 interface Ethernet4." + ], }, }, { @@ -207,6 +352,26 @@ } } }, + "192.0.255.71": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + "192.0.255.17": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, }, "ipv6Neighbors": {}, } @@ -219,7 +384,10 @@ "inputs": {"last_down": 2}, "expected": { "result": "failure", - "messages": ["Following BFD peers were down:\n192.0.255.7 in default VRF was down 3 hours ago."], + "messages": [ + "Following BFD peers were down:\n192.0.255.7 in default VRF was down 3 hours ago.\n" + "192.0.255.71 in default VRF was down 3 hours ago.\n192.0.255.17 in default VRF was down 3 hours ago." + ], }, }, ] From 2401bf6e0d2075ebc4cfe06a0cff2c44117c8df2 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Wed, 31 Jan 2024 06:55:49 +0000 Subject: [PATCH 3/5] issue-503: updated doc string --- anta/tests/bfd.py | 48 +++++--- docs/api/tests.bfd.md | 13 +++ docs/api/tests.md | 3 +- mkdocs.yml | 1 + tests/units/anta_tests/test_bfd.py | 175 +++-------------------------- 5 files changed, 63 insertions(+), 177 deletions(-) create mode 100644 docs/api/tests.bfd.md diff --git a/anta/tests/bfd.py b/anta/tests/bfd.py index bd7947d7d..285f49410 100644 --- a/anta/tests/bfd.py +++ b/anta/tests/bfd.py @@ -11,31 +11,41 @@ from datetime import datetime from typing import Optional +from pydantic import Field + from anta.models import AntaCommand, AntaTest class VerifyBFDPeersHealth(AntaTest): """ - Verifies there is no IPv4 BFD peer in the down state, remote disc is not zero and last down should be above the threshold for all VRF. + This class verifies the health of IPv4 BFD peers across all VRFs. + It checks that no BFD peer is in the down state and that the discriminator value of the remote system is not zero. + + Optionally, it can also verify that BFD peers have not been down before a specified threshold of hours. + Expected results: - * success: The test will pass if IPv4 BFD peers are not down, remote disc is not zero and last down above the defined threshold for all VRF. - * failure: The test will fail if IPv4 BFD peers are down, remote disc is zero and last down below the defined threshold for all VRF. + * Success: The test will pass if all IPv4 BFD peers are up, the discriminator value of each remote system is non-zero, + and the last downtime of each peer is above the defined threshold. + * Failure: The test will fail if any IPv4 BFD peer is down, the discriminator value of any remote system is zero, + or the last downtime of any peer is below the defined threshold. """ name = "VerifyBFDPeersHealth" - description = "Verifies there is no IPv4 BFD peer in the down state, remote disc is not zero and last down should be above the threshold for all VRF." + description = ( + "Verifies there is no IPv4 BFD peer in the down state and discriminator value of the remote system is not zero for all VRF. " + "BFD peer last down in hours is optional check which should be above the threshold for all VRF." + ) categories = ["bfd"] - # revision 1 as later revision introduces additional nesting for type - commands = [AntaCommand(command="show bfd peers", revision=1), AntaCommand(command="show clock")] + commands = [AntaCommand(command="show bfd peers"), AntaCommand(command="show clock")] class Input(AntaTest.Input): """ - This class defines the input parameters of the testcase. + This class defines the input parameters of the test case. """ - last_down: Optional[int] = None - """Optional last down threshold in hours""" + down_threshold: Optional[int] = Field(default=None, gt=0) + """Optional down threshold in hours to check if a BFD peer was down before those hours or not.""" @AntaTest.anta_test def test(self) -> None: @@ -51,27 +61,33 @@ def test(self) -> None: # set the initial result self.result.is_success() + # Check if any IPv4 BFD peer is configured + ipv4_neighbors_exist = any(vrf_data["ipv4Neighbors"] for vrf_data in bfd_output["vrfs"].values()) + if not ipv4_neighbors_exist: + self.result.is_failure("No IPv4 BFD peers are configured for any VRF.") + return + # Iterate over IPv4 BFD peers for vrf, vrf_data in bfd_output["vrfs"].items(): for peer, neighbor_data in vrf_data["ipv4Neighbors"].items(): for peer_data in neighbor_data["peerStats"].values(): peer_status = peer_data["status"] remote_disc = peer_data["remoteDisc"] - peer_l3intf = peer_data.get("l3intf", "") - l3intf_info = f" with peer layer3 interface {peer_l3intf}" if peer_l3intf else "" remote_disc_info = f" with remote disc {remote_disc}" if remote_disc == 0 else "" - remote_disc_info = f" and remote disc {remote_disc}" if remote_disc == 0 and peer_l3intf else remote_disc_info last_down = peer_data["lastDown"] hours_difference = (datetime.fromtimestamp(current_timestamp) - datetime.fromtimestamp(last_down)).total_seconds() / 3600 + # Check if peer status is not up if peer_status != "up": - down_failures.append(f"{peer} is {peer_status} in {vrf} VRF{l3intf_info}{remote_disc_info}.") + down_failures.append(f"{peer} is {peer_status} in {vrf} VRF{remote_disc_info}.") + # Check if the last down is within the threshold - elif self.inputs.last_down and hours_difference > self.inputs.last_down: - up_failures.append(f"{peer} in {vrf} VRF was down {round(hours_difference)} hours ago{l3intf_info}{remote_disc_info}.") + elif self.inputs.down_threshold and hours_difference > self.inputs.down_threshold: + up_failures.append(f"{peer} in {vrf} VRF was down {round(hours_difference)} hours ago{remote_disc_info}.") + # Check if remote disc is 0 elif remote_disc == 0: - up_failures.append(f"{peer} in {vrf} VRF has remote disc {remote_disc}{l3intf_info}.") + up_failures.append(f"{peer} in {vrf} VRF has remote disc {remote_disc}.") # Check if there are any failures if down_failures: diff --git a/docs/api/tests.bfd.md b/docs/api/tests.bfd.md new file mode 100644 index 000000000..d28521fbc --- /dev/null +++ b/docs/api/tests.bfd.md @@ -0,0 +1,13 @@ + + +# ANTA catalog for bfd tests + +::: anta.tests.bfd + options: + show_root_heading: false + show_root_toc_entry: false + merge_init_into_class: false diff --git a/docs/api/tests.md b/docs/api/tests.md index 68c949fc4..f31278b05 100644 --- a/docs/api/tests.md +++ b/docs/api/tests.md @@ -1,5 +1,5 @@ @@ -10,6 +10,7 @@ This section describes all the available tests provided by ANTA package. - [AAA](tests.aaa.md) +- [BFD](tests.bfd.md) - [Configuration](tests.configuration.md) - [Connectivity](tests.connectivity.md) - [Field Notice](tests.field_notices.md) diff --git a/mkdocs.yml b/mkdocs.yml index 4aed5264e..3ae1d9f9a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -173,6 +173,7 @@ nav: - Test Catalog Documentation: - Overview: api/tests.md - AAA: api/tests.aaa.md + - BFD: api/tests.bfd.md - Configuration: api/tests.configuration.md - Connectivity: api/tests.connectivity.md - Field Notices: api/tests.field_notices.md diff --git a/tests/units/anta_tests/test_bfd.py b/tests/units/anta_tests/test_bfd.py index 8c9e0353a..5640e872a 100644 --- a/tests/units/anta_tests/test_bfd.py +++ b/tests/units/anta_tests/test_bfd.py @@ -57,30 +57,23 @@ "utcTime": 1703658481.8778424, }, ], - "inputs": {"last_down": 2}, + "inputs": {"down_threshold": 2}, "expected": {"result": "success"}, }, { - "name": "success-no-peer", + "name": "failure-no-peer", "test": VerifyBFDPeersHealth, "eos_data": [ { "vrfs": { "MGMT": { - "ipv6Neighbors": { - "2266:25::12:0:ad12": { - "peerStats": { - "": { - "status": "up", - "remoteDisc": 3940685114, - "lastDown": 1703657258.652725, - "l3intf": "", - } - } - }, - }, + "ipv6Neighbors": {}, "ipv4Neighbors": {}, - } + }, + "default": { + "ipv6Neighbors": {}, + "ipv4Neighbors": {}, + }, } }, { @@ -88,7 +81,10 @@ }, ], "inputs": None, - "expected": {"result": "success"}, + "expected": { + "result": "failure", + "messages": ["No IPv4 BFD peers are configured for any VRF."], + }, }, { "name": "failure-session-down", @@ -142,7 +138,7 @@ "utcTime": 1703658481.8778424, }, ], - "inputs": {"last_down": 2}, + "inputs": {"down_threshold": 2}, "expected": { "result": "failure", "messages": [ @@ -150,69 +146,6 @@ ], }, }, - { - "name": "failure-session-down-l3intf", - "test": VerifyBFDPeersHealth, - "eos_data": [ - { - "vrfs": { - "default": { - "ipv4Neighbors": { - "192.0.255.7": { - "peerStats": { - "": { - "status": "down", - "remoteDisc": 0, - "lastDown": 1703657258.652725, - "l3intf": "Ethernet2", - } - } - }, - "192.0.255.1": { - "peerStats": { - "": { - "status": "down", - "remoteDisc": 0, - "lastDown": 1703657258.652725, - "l3intf": "Ethernet3", - } - } - }, - }, - "ipv6Neighbors": {}, - }, - "MGMT": { - "ipv4Neighbors": { - "192.0.255.71": { - "peerStats": { - "": { - "status": "down", - "remoteDisc": 0, - "lastDown": 1703657258.652725, - "l3intf": "Ethernet12", - } - } - }, - }, - "ipv6Neighbors": {}, - }, - } - }, - { - "utcTime": 1703658481.8778424, - }, - ], - "inputs": {"last_down": 2}, - "expected": { - "result": "failure", - "messages": [ - "Following BFD peers are not up:\n" - "192.0.255.7 is down in default VRF with peer layer3 interface Ethernet2 and remote disc 0.\n" - "192.0.255.1 is down in default VRF with peer layer3 interface Ethernet3 and remote disc 0.\n" - "192.0.255.71 is down in MGMT VRF with peer layer3 interface Ethernet12 and remote disc 0." - ], - }, - }, { "name": "failure-session-up-disc", "test": VerifyBFDPeersHealth, @@ -253,85 +186,7 @@ "inputs": {}, "expected": { "result": "failure", - "messages": [ - "Following BFD peers were down:\n" - "192.0.255.7 in default VRF has remote disc 0 with peer layer3 interface Ethernet2.\n" - "192.0.255.71 in default VRF has remote disc 0 with peer layer3 interface Ethernet2." - ], - }, - }, - { - "name": "failure-last-down-l3intf", - "test": VerifyBFDPeersHealth, - "eos_data": [ - { - "vrfs": { - "default": { - "ipv4Neighbors": { - "192.0.255.7": { - "peerStats": { - "": { - "status": "up", - "remoteDisc": 3940685114, - "lastDown": 1703657258.652725, - "l3intf": "Ethernet2", - } - } - }, - "192.0.255.70": { - "peerStats": { - "": { - "status": "up", - "remoteDisc": 3940685114, - "lastDown": 1703657258.652725, - "l3intf": "Ethernet1", - } - } - }, - }, - "ipv6Neighbors": {}, - }, - "MGMT": { - "ipv4Neighbors": { - "192.0.255.17": { - "peerStats": { - "": { - "status": "up", - "remoteDisc": 3940685114, - "lastDown": 1703657258.652725, - "l3intf": "Ethernet3", - } - } - }, - "192.0.255.27": { - "peerStats": { - "": { - "status": "up", - "remoteDisc": 3940685114, - "lastDown": 1703657258.652725, - "l3intf": "Ethernet4", - } - } - }, - }, - "ipv6Neighbors": {}, - }, - } - }, - { - "utcTime": 1703667348.111288, - }, - ], - "inputs": {"last_down": 2}, - "expected": { - "result": "failure", - "messages": [ - "Following BFD peers were down:\n" - "192.0.255.7 in default VRF was down 3 hours ago with peer layer3 interface Ethernet2.\n" - "192.0.255.70 in default VRF was down 3 hours ago with peer layer3 interface Ethernet1.\n" - "192.0.255.17 in MGMT VRF was down 3 hours ago with peer layer3 interface Ethernet3.\n" - "192.0.255.27 in MGMT VRF was down 3 hours ago with peer layer3 interface Ethernet4." - ], + "messages": ["Following BFD peers were down:\n 192.0.255.7 in default VRF has remote disc 0.\n 192.0.255.71 in default VRF has remote disc 0."], }, }, { @@ -381,7 +236,7 @@ "utcTime": 1703667348.111288, }, ], - "inputs": {"last_down": 2}, + "inputs": {"down_threshold": 2}, "expected": { "result": "failure", "messages": [ From fea2f8edf7831b4f3ff0b16a4d1bc7703cd2a059 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Thu, 1 Feb 2024 07:53:53 +0000 Subject: [PATCH 4/5] issue-503: updated tests.yaml file --- examples/tests.yaml | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/examples/tests.yaml b/examples/tests.yaml index 9cf1f6bcb..d1771b89b 100644 --- a/examples/tests.yaml +++ b/examples/tests.yaml @@ -219,22 +219,6 @@ anta.tests.security: # Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. - - VerifyIpv4ACL: - ipv4_access_lists: - - name: default-control-plane-acl - entries: - - sequence: 10 - action: permit icmp any any - - sequence: 20 - action: permit ip any any tracked - - sequence: 30 - action: permit udp any any eq bfd ttl eq 255 - - name: LabTest - entries: - - sequence: 10 - action: permit icmp any any - - sequence: 20 - action: permit tcp any any range 5900 5910 anta.tests.snmp: - VerifySnmpStatus: From 35c8e3b558d137b01717d67b756fddc1eb57d528 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Fri, 23 Feb 2024 03:24:22 -0800 Subject: [PATCH 5/5] issue-503: updated doc string --- anta/tests/bfd.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/anta/tests/bfd.py b/anta/tests/bfd.py index 6aac6b946..aea8d07b0 100644 --- a/anta/tests/bfd.py +++ b/anta/tests/bfd.py @@ -159,8 +159,10 @@ def test(self) -> None: class VerifyBFDPeersHealth(AntaTest): """ This class verifies the health of IPv4 BFD peers across all VRFs. + It checks that no BFD peer is in the down state and that the discriminator value of the remote system is not zero. Optionally, it can also verify that BFD peers have not been down before a specified threshold of hours. + Expected results: * Success: The test will pass if all IPv4 BFD peers are up, the discriminator value of each remote system is non-zero, and the last downtime of each peer is above the defined threshold. @@ -169,10 +171,7 @@ class VerifyBFDPeersHealth(AntaTest): """ name = "VerifyBFDPeersHealth" - description = ( - "Verifies there is no IPv4 BFD peer in the down state and discriminator value of the remote system is not zero for all VRF. " - "BFD peer last down in hours is optional check which should be above the threshold for all VRF." - ) + description = "Verifies the health of all IPv4 BFD peers." categories = ["bfd"] # revision 1 as later revision introduces additional nesting for type commands = [AntaCommand(command="show bfd peers", revision=1), AntaCommand(command="show clock")]