From a2c35689b84bde7544826504ac5ba04b2d535447 Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Tue, 14 May 2024 14:47:44 +0200 Subject: [PATCH 01/20] Remove obsolete import --- python/nav/bin/navclean.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/nav/bin/navclean.py b/python/nav/bin/navclean.py index f32f980dcc..938eb2e36f 100755 --- a/python/nav/bin/navclean.py +++ b/python/nav/bin/navclean.py @@ -17,8 +17,6 @@ # along with NAV. If not, see . # """Deletes old data from the NAV database""" -from __future__ import print_function - import sys import argparse From 7208968b871be5dbf4a8611882daf8efebf08956 Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Tue, 14 May 2024 14:50:48 +0200 Subject: [PATCH 02/20] Improve command line help and docstrings --- python/nav/bin/navclean.py | 50 +++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/python/nav/bin/navclean.py b/python/nav/bin/navclean.py index 938eb2e36f..a847afb89f 100755 --- a/python/nav/bin/navclean.py +++ b/python/nav/bin/navclean.py @@ -3,6 +3,7 @@ # -*- testargs: --arp -*- # # Copyright (C) 2017 Uninett AS +# Copyright (C) 2024 Sikt # # This file is part of Network Administration Visualized (NAV). # @@ -16,7 +17,7 @@ # details. You should have received a copy of the GNU General Public License # along with NAV. If not, see . # -"""Deletes old data from the NAV database""" +"""Cleans old data from the NAV database""" import sys import argparse @@ -86,39 +87,50 @@ def main(): def make_argparser(): """Makes this program's ArgumentParser""" parser = argparse.ArgumentParser( - description="Deletes old data from the NAV database", - epilog="Unless options are given, the number of expired records will " - "be printed. The default expiry limit is 6 months. The -e and -E" - " options set a common expiry date for all selected tables. If " - "you want different expiry dates for each table, you need to " - "run navclean more than once. To actually delete the expired " - "records, add the -f option.", + description="Cleans old data from the NAV database", + epilog="Cleaning old data means either deleting old records, or updating " + "expired records. Use options to select which types of data to clean. " + "The -e and -E options set an expiry date that applies to all " + "selected data types. Every run is a dry-run by default, unless the " + "-f option is given, in order to avoid accidental data deletion.", ) arg = parser.add_argument - arg("-q", "--quiet", action="store_true", help="be quiet") - arg("-f", "--force", action="store_true", help="force deletion of expired records") + arg("-q", "--quiet", action="store_true", help="Be quiet") + arg("-f", "--force", action="store_true", help="Force actual database updates") arg( "-e", "--datetime", type=postgresql_datetime, - help="set an explicit expiry date on ISO format", + help="Set an explicit expiry date on ISO format", ) arg( "-E", "--interval", type=postgresql_interval, default="6 months", - help="set an expiry interval using PostgreSQL interval syntax, e.g. " - "'30 days', '4 weeks', '6 months'", + help="Set an expiry interval using PostgreSQL interval syntax, e.g. " + "'30 days', '4 weeks', '6 months', '30 minutes'", ) - arg("--arp", action="store_true", help="delete from ARP table") - arg("--cam", action="store_true", help="delete from CAM table") - arg("--radiusacct", action="store_true", help="delete from Radius accounting table") - arg("--radiuslog", action="store_true", help="delete from Radius error log table") - arg("--netbox", action="store_true", help="delete from NETBOX table") - arg("--websessions", action="store_true", help="delete expired web sessions") + arg("--arp", action="store_true", help="Delete old records from ARP table") + arg("--cam", action="store_true", help="Delete old records from CAM table") + arg( + "--radiusacct", + action="store_true", + help="Delete old records from Radius accounting table", + ) + arg( + "--radiuslog", + action="store_true", + help="Delete old records from Radius error log table", + ) + arg( + "--netbox", + action="store_true", + help="Delete netboxes that have been marked for deletion by Seed Database", + ) + arg("--websessions", action="store_true", help="Delete expired web sessions") return parser From 2fe8a128f9f195b9ad06df81d01dc5815358ddfe Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Tue, 14 May 2024 15:09:36 +0200 Subject: [PATCH 03/20] Clean up navclean terminology! We're about to introduce cleaning operations that do not entail actual record deletion, just record updating. For that reason, it's best to change the "deleter" concept in the code to a more generic "cleaner" concept. --- python/nav/bin/navclean.py | 82 +++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 37 deletions(-) diff --git a/python/nav/bin/navclean.py b/python/nav/bin/navclean.py index a847afb89f..a5f761ca9a 100755 --- a/python/nav/bin/navclean.py +++ b/python/nav/bin/navclean.py @@ -18,8 +18,9 @@ # along with NAV. If not, see . # """Cleans old data from the NAV database""" -import sys import argparse +import sys +from typing import List from nav.bootstrap import bootstrap_django @@ -44,19 +45,20 @@ def main(): expiry = args.datetime connection = nav.db.getConnection('default', 'manage') - deleters = get_selected_deleters(args, connection) + cleaners = get_selected_cleaners(args, connection) - deleted = False - for deleter in deleters: + cleaned = False + for cleaner in cleaners: try: - count = deleter.delete(expiry, dry_run=not args.force) + count = cleaner.clean(expiry, dry_run=not args.force) if not args.quiet: print( "Expired records in {table}: {count}".format( - table=deleter.table, count=count if count is not None else "N/A" + table=cleaner.expiry_type, + count=count if count is not None else "N/A", ) ) - deleted = True + cleaned = True except psycopg2.Error as error: print("The PostgreSQL backend produced an error", file=sys.stderr) @@ -66,15 +68,15 @@ def main(): if not args.force: connection.rollback() - deleted = False + cleaned = False else: connection.commit() if not args.quiet: - if deleted: - print("Expired records were deleted.") + if cleaned: + print("Expired records were updated/deleted.") else: - print("Nothing deleted.") + print("Nothing changed.") connection.close() @@ -160,26 +162,28 @@ def postgresql_interval(value): return value -def get_selected_deleters(args, connection): - """Returns a list of RecordDeleter instances for each of the tables +def get_selected_cleaners( + args: argparse.Namespace, connection +) -> List["RecordCleaner"]: + """Returns a list of RecordCleaner instances for each of the tables selected in the supplied ArgumentParser. """ return [ - deleter(connection) - for deleter in RecordDeleter.__subclasses__() - if getattr(args, deleter.table, False) + cleaner(connection) + for cleaner in RecordCleaner.__subclasses__() + if getattr(args, cleaner.expiry_type, False) ] # -# Deleter implementations +# Cleaner implementations # -class RecordDeleter(object): - """Base class for record deletion""" +class RecordCleaner: + """Base class for record cleaning""" - table = None + expiry_type = None selector = "" def __init__(self, connection): @@ -190,12 +194,16 @@ def filter(self, expiry): return self.selector.format(expiry=expiry) def sql(self, expiry): - """Returns the full DELETE statement based on the expiry date""" + """Returns the full DELETE statement based on the expiry date. Override this + method if a different kind of update statement is needed. + """ where = self.filter(expiry) - return 'DELETE FROM {table} {filter}'.format(table=self.table, filter=where) + return 'DELETE FROM {table} {filter}'.format( + table=self.expiry_type, filter=where + ) - def delete(self, expiry, dry_run=False): - """Deletes the records selected by the expiry spec""" + def clean(self, expiry: str, dry_run: bool = False): + """Cleans the records selected by the expiry spec""" cursor = self.connection.cursor() sql = self.sql(expiry) cursor.execute(sql) @@ -205,18 +213,18 @@ def delete(self, expiry, dry_run=False): # pylint: disable=missing-docstring -class ArpDeleter(RecordDeleter): - table = "arp" +class ArpDeleter(RecordCleaner): + expiry_type = "arp" selector = "WHERE end_time < {expiry}" -class CamDeleter(RecordDeleter): - table = "cam" +class CamDeleter(RecordCleaner): + expiry_type = "cam" selector = "WHERE end_time < {expiry}" -class RadiusAcctDeleter(RecordDeleter): - table = "radiusacct" +class RadiusAcctDeleter(RecordCleaner): + expiry_type = "radiusacct" selector = """ WHERE (acctstoptime < {expiry}) OR ((acctstarttime + (acctsessiontime * interval '1 sec')) < {expiry}) @@ -225,23 +233,23 @@ class RadiusAcctDeleter(RecordDeleter): """ -class RadiusLogDeleter(RecordDeleter): - table = "radiuslog" +class RadiusLogDeleter(RecordCleaner): + expiry_type = "radiuslog" selector = "WHERE time < {expiry}" -class NetboxDeleter(RecordDeleter): - table = "netbox" +class NetboxDeleter(RecordCleaner): + expiry_type = "netbox" selector = "WHERE pg_try_advisory_lock(netboxid) and deleted_at IS NOT NULL" -class SessionDeleter(RecordDeleter): +class SessionDeleter(RecordCleaner): """Special case deleter for Django Sessions""" - table = "websessions" + expiry_type = "websessions" @atomic - def delete(self, expiry, dry_run=False): + def clean(self, expiry, dry_run=False): """Deletes all expired django sessions if not a dry_run. Expiry spec is ignored, as sessions have a different expiry mechanism. From 3f4c7ad3b73db69c1c49269f5b815ecf2eb7752f Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Tue, 14 May 2024 15:15:48 +0200 Subject: [PATCH 04/20] Introduce ARP record closer to navclean This new command can be used to explicitly close ARP records from netboxes that have been down for too long. --- python/nav/bin/navclean.py | 42 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/python/nav/bin/navclean.py b/python/nav/bin/navclean.py index a5f761ca9a..a54238e700 100755 --- a/python/nav/bin/navclean.py +++ b/python/nav/bin/navclean.py @@ -116,6 +116,12 @@ def make_argparser(): ) arg("--arp", action="store_true", help="Delete old records from ARP table") + arg( + "--close-arp", + action="store_true", + help="Close expired ARP records. Expired records are those where the netbox " + "has been down for longer than the set expiry limit", + ) arg("--cam", action="store_true", help="Delete old records from CAM table") arg( "--radiusacct", @@ -218,6 +224,42 @@ class ArpDeleter(RecordCleaner): selector = "WHERE end_time < {expiry}" +class ArpCloser(RecordCleaner): + """Closes ARP records that have "expired", i.e. they're open, but the netbox they + were collected from has been down for too long. + """ + + expiry_type = "close_arp" + + def sql(self, expiry): + """Returns the full UPDATE statement based on the expiry date""" + return f""" + WITH downsince AS ( + SELECT + netboxid, + start_time AS downsince + FROM + alerthist + WHERE + eventtypeid = 'boxState' + AND end_time >= 'infinity' + ) + UPDATE + arp + SET + end_time = NOW() + WHERE + netboxid IN ( + SELECT + netboxid + FROM + downsince + WHERE + downsince < {expiry}) + AND end_time >= 'infinity' + """ + + class CamDeleter(RecordCleaner): expiry_type = "cam" selector = "WHERE end_time < {expiry}" From 43d5543be92494877c92e082373ceaebd24568e6 Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Tue, 14 May 2024 15:20:43 +0200 Subject: [PATCH 05/20] Drop malfunctioning ARP record closure rule This rule has unintended and bad side-effects. It will cause ARP records to be immediately closed when NAV experience intermittent ICMP packet losses from a router, specifically the kind that are so short-lived that NAV never actually declares the device as down through a new `alerthist` record. `pping` will, however, use `netbox.up` as its persistent internal up/down status, which can cause ARP records to be closed as a result of non-serious flapping. --- python/nav/models/sql/changes/sc.05.10.0001.sql | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 python/nav/models/sql/changes/sc.05.10.0001.sql diff --git a/python/nav/models/sql/changes/sc.05.10.0001.sql b/python/nav/models/sql/changes/sc.05.10.0001.sql new file mode 100644 index 0000000000..dae97f6cb7 --- /dev/null +++ b/python/nav/models/sql/changes/sc.05.10.0001.sql @@ -0,0 +1,2 @@ +-- Remove malfunctioning ARP record closing rule, see #2910 +DROP RULE IF EXISTS netbox_close_arp ON netbox; From 9bd59bd8354211b5716a8f889952f44e353b1b60 Mon Sep 17 00:00:00 2001 From: Simon Oliver Tveit Date: Wed, 15 May 2024 15:21:37 +0200 Subject: [PATCH 06/20] Limit concurrent parallel dns lookups --- python/nav/asyncdns.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/python/nav/asyncdns.py b/python/nav/asyncdns.py index 4edd5dcfa0..2422f10b6b 100644 --- a/python/nav/asyncdns.py +++ b/python/nav/asyncdns.py @@ -33,7 +33,7 @@ from IPy import IP from twisted.names import dns from twisted.names import client -from twisted.internet import defer +from twisted.internet import defer, task # pylint: disable=E1101 from twisted.internet import reactor @@ -46,6 +46,9 @@ from twisted.names.error import DNSNotImplementedError, DNSQueryRefusedError +BATCH_SIZE = 100 + + def reverse_lookup(addresses): """Runs parallel reverse DNS lookups for addresses. @@ -81,14 +84,19 @@ def resolve(self, names): self._finished = False self.results = defaultdict(list) - deferred_list = [] - for name in names: - for deferred in self.lookup(name): - deferred.addCallback(self._extract_records, name) - deferred.addErrback(self._errback, name) - deferred_list.append(deferred) - - deferred_list = defer.DeferredList(deferred_list) + def lookup_names(): + for name in names: + for deferred in self.lookup(name): + deferred.addCallback(self._extract_records, name) + deferred.addErrback(self._errback, name) + yield deferred + + # Limits the number of parallel requests to BATCH_SIZE + coop = task.Cooperator() + work = lookup_names() + deferred_list = defer.DeferredList( + [coop.coiterate(work) for _ in range(BATCH_SIZE)] + ) deferred_list.addCallback(self._parse_result) deferred_list.addCallback(self._finish) From ee9fcd84cb14da2636f9cc139bf08025fd98f283 Mon Sep 17 00:00:00 2001 From: Simon Oliver Tveit Date: Wed, 15 May 2024 19:38:51 +0200 Subject: [PATCH 07/20] Let each separate task save itself to the results --- python/nav/asyncdns.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/python/nav/asyncdns.py b/python/nav/asyncdns.py index 2422f10b6b..a19796faa1 100644 --- a/python/nav/asyncdns.py +++ b/python/nav/asyncdns.py @@ -89,6 +89,7 @@ def lookup_names(): for deferred in self.lookup(name): deferred.addCallback(self._extract_records, name) deferred.addErrback(self._errback, name) + deferred.addCallback(self._parse_result) yield deferred # Limits the number of parallel requests to BATCH_SIZE @@ -97,7 +98,6 @@ def lookup_names(): deferred_list = defer.DeferredList( [coop.coiterate(work) for _ in range(BATCH_SIZE)] ) - deferred_list.addCallback(self._parse_result) deferred_list.addCallback(self._finish) while not self._finished: @@ -117,12 +117,11 @@ def _extract_records(result, name): raise NotImplementedError def _parse_result(self, result): - """Parses the result to the correct format""" - for _success, (name, response) in result: - if isinstance(response, Exception): - self.results[name] = response - else: - self.results[name].extend(response) + name, response = result + if isinstance(response, Exception): + self.results[name] = response + else: + self.results[name].extend(response) @staticmethod def _errback(failure, host): From 65722238db7e3fe003d9362f81226152fa444b50 Mon Sep 17 00:00:00 2001 From: Simon Oliver Tveit Date: Thu, 16 May 2024 14:28:53 +0200 Subject: [PATCH 08/20] Rename function to more accurate name It doesnt really do much parsing, the saving part is the most significant function of it --- python/nav/asyncdns.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/nav/asyncdns.py b/python/nav/asyncdns.py index a19796faa1..796b7546cb 100644 --- a/python/nav/asyncdns.py +++ b/python/nav/asyncdns.py @@ -89,7 +89,7 @@ def lookup_names(): for deferred in self.lookup(name): deferred.addCallback(self._extract_records, name) deferred.addErrback(self._errback, name) - deferred.addCallback(self._parse_result) + deferred.addCallback(self._save_result) yield deferred # Limits the number of parallel requests to BATCH_SIZE @@ -116,7 +116,7 @@ def lookup(self, name): def _extract_records(result, name): raise NotImplementedError - def _parse_result(self, result): + def _save_result(self, result): name, response = result if isinstance(response, Exception): self.results[name] = response From cf294693ab266c85015f410c4e759b9764dc528b Mon Sep 17 00:00:00 2001 From: Simon Oliver Tveit Date: Thu, 16 May 2024 14:21:06 +0200 Subject: [PATCH 09/20] Add changelog --- changelog.d/+2669.fixed.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/+2669.fixed.md diff --git a/changelog.d/+2669.fixed.md b/changelog.d/+2669.fixed.md new file mode 100644 index 0000000000..9e0822e9a1 --- /dev/null +++ b/changelog.d/+2669.fixed.md @@ -0,0 +1 @@ +Fix Machine Tracker DNS search crashing from exhausting all available file descriptors \ No newline at end of file From 93fca874d0e57db1284b5ef38614ac7a1fd66d1b Mon Sep 17 00:00:00 2001 From: Simon Oliver Tveit Date: Thu, 16 May 2024 15:01:44 +0200 Subject: [PATCH 10/20] Raise first error that occurred during coiteration The reactor will by default log exceptions that occur in the coiterator, resulting in nothing being raised. Code using the asyncdns lib except certain errors to occur when something is wrong so this should keep the current asyncdns interfacing intact, at the cost of taking longer to fail --- python/nav/asyncdns.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/python/nav/asyncdns.py b/python/nav/asyncdns.py index 796b7546cb..fb1d9d1148 100644 --- a/python/nav/asyncdns.py +++ b/python/nav/asyncdns.py @@ -78,11 +78,13 @@ def __init__(self): ) self.results = defaultdict(list) self._finished = False + self._errors = [] def resolve(self, names): """Resolves DNS names in parallel""" - self._finished = False self.results = defaultdict(list) + self._finished = False + self._errors = [] def lookup_names(): for name in names: @@ -96,7 +98,10 @@ def lookup_names(): coop = task.Cooperator() work = lookup_names() deferred_list = defer.DeferredList( - [coop.coiterate(work) for _ in range(BATCH_SIZE)] + [ + coop.coiterate(work).addErrback(self._save_error) + for _ in range(BATCH_SIZE) + ] ) deferred_list.addCallback(self._finish) @@ -106,6 +111,10 @@ def lookup_names(): # iteration to ensure the resolver library closes its UDP sockets reactor.iterate() + # raise first error if any occurred + for error in self._errors: + raise error + return dict(self.results) def lookup(self, name): @@ -128,6 +137,10 @@ def _errback(failure, host): """Errback""" return host, failure.value + def _save_error(self, failure): + """Errback for coiterator. Saves error so it can be raised later""" + self._errors.append(failure.value) + def _finish(self, _): self._finished = True From 3b8fa4838aa7e94ea2c1efacc73f67034d8d0115 Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Tue, 14 May 2024 13:35:00 +0000 Subject: [PATCH 11/20] Add news fragment for 2913 --- changelog.d/2913.fixed.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/2913.fixed.md diff --git a/changelog.d/2913.fixed.md b/changelog.d/2913.fixed.md new file mode 100644 index 0000000000..130ac7ef16 --- /dev/null +++ b/changelog.d/2913.fixed.md @@ -0,0 +1 @@ +ARP records of unreachable devices are now closed by `navclean` cron job at configurable expiry intervals, rather than immediately as a response to a short ICMP packet loss \ No newline at end of file From 1b50d1a9fc951033d2d7a2c549119ad0117894d2 Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Tue, 14 May 2024 13:41:14 +0000 Subject: [PATCH 12/20] Close netbox ARP records after 30min downtime This adds a default navclean job every five minutes to close ARP records of devices that have been down for more than 30 minutes. Needs to be a separate job line, since it doesn't use the built-in default of 6 month expiry interval. --- python/nav/etc/cron.d/dbclean | 1 + 1 file changed, 1 insertion(+) diff --git a/python/nav/etc/cron.d/dbclean b/python/nav/etc/cron.d/dbclean index c5165c9f2a..bbcb7817d6 100644 --- a/python/nav/etc/cron.d/dbclean +++ b/python/nav/etc/cron.d/dbclean @@ -1,2 +1,3 @@ ## info: Clean database with navclean */5 * * * * navclean -f -q --netbox --websessions +*/5 * * * * navclean -f -q --close-arp --interval '30 minutes' From 8c8f2a393614a384bc7ae7b893fe08b021812f0e Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Tue, 14 May 2024 13:42:09 +0000 Subject: [PATCH 13/20] Test run the --close-arp subcommand --- python/nav/bin/navclean.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/nav/bin/navclean.py b/python/nav/bin/navclean.py index a54238e700..74d14f13ec 100755 --- a/python/nav/bin/navclean.py +++ b/python/nav/bin/navclean.py @@ -1,6 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -*- testargs: --arp -*- +# -*- testargs: --close-arp -*- # # Copyright (C) 2017 Uninett AS # Copyright (C) 2024 Sikt From 40ef9e8d6f901141a5814feed8de3dc76b6e547b Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Wed, 15 May 2024 13:42:05 +0000 Subject: [PATCH 14/20] Document the addition to `dbclean` job --- doc/reference/backend-processes.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/reference/backend-processes.rst b/doc/reference/backend-processes.rst index 15d6078949..c9c94c944b 100644 --- a/doc/reference/backend-processes.rst +++ b/doc/reference/backend-processes.rst @@ -126,7 +126,9 @@ dbclean Regularly cleans out old data from the NAV database, using the :program:`navclean` program. The standard cleanup routine removes old web user interface sessions, and deletes IP devices that have been scheduled for -deletion through either SeedDB or the API. +deletion through either SeedDB or the API. Additionally, it closes open ARP +records that have been collected from routers that have been unreachable for +more than 30 minutes (adjustable by modifying the `dbclean` cron fragment). :Dependencies: None From d4ea5c1b873b717d1d540b2bc265515ded88e22d Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Tue, 21 May 2024 13:11:23 +0000 Subject: [PATCH 15/20] Stop referring to tables Use the expiry type name without necessarily referring to it as a table. As discussed in code review. --- python/nav/bin/navclean.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/nav/bin/navclean.py b/python/nav/bin/navclean.py index 74d14f13ec..498c924c6e 100755 --- a/python/nav/bin/navclean.py +++ b/python/nav/bin/navclean.py @@ -54,8 +54,8 @@ def main(): count = cleaner.clean(expiry, dry_run=not args.force) if not args.quiet: print( - "Expired records in {table}: {count}".format( - table=cleaner.expiry_type, + "Expired {expiry_type} records: {count}".format( + expiry_type=cleaner.expiry_type, count=count if count is not None else "N/A", ) ) From d12fcbc3c951bb03de0c9acd1673e9e70aec947d Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Tue, 21 May 2024 13:13:35 +0000 Subject: [PATCH 16/20] Clarify intermediate table name As mentioned in code review, the double downsince name is slightly confusing, so this renames the intermediate lookup table to something hopefully more understandable. --- python/nav/bin/navclean.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/nav/bin/navclean.py b/python/nav/bin/navclean.py index 498c924c6e..5e5f60b7bf 100755 --- a/python/nav/bin/navclean.py +++ b/python/nav/bin/navclean.py @@ -235,7 +235,7 @@ class ArpCloser(RecordCleaner): def sql(self, expiry): """Returns the full UPDATE statement based on the expiry date""" return f""" - WITH downsince AS ( + WITH unreachable_devices AS ( SELECT netboxid, start_time AS downsince @@ -254,7 +254,7 @@ def sql(self, expiry): SELECT netboxid FROM - downsince + unreachable_devices WHERE downsince < {expiry}) AND end_time >= 'infinity' From b76730131d78999f3e05ca2cb2ac6e60e9554089 Mon Sep 17 00:00:00 2001 From: Joar Heimonen Date: Thu, 23 May 2024 15:44:43 +0200 Subject: [PATCH 17/20] fix: make parse_arp position agnostic --- python/nav/ipdevpoll/plugins/paloaltoarp.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/nav/ipdevpoll/plugins/paloaltoarp.py b/python/nav/ipdevpoll/plugins/paloaltoarp.py index 5d5bd0dc06..fc5ee484df 100644 --- a/python/nav/ipdevpoll/plugins/paloaltoarp.py +++ b/python/nav/ipdevpoll/plugins/paloaltoarp.py @@ -138,11 +138,11 @@ def parse_arp(arp): arps = [] root = ET.fromstring(arp) - entries = root[0][4] + entries = root.find("result").find("entries") for entry in entries: - status = entry[0].text - ip = entry[1].text - mac = entry[2].text + status = entry.find("status").text + ip = entry.find("ip").text + mac = entry.find("mac").text if status.strip() != "i": if mac != "(incomplete)": arps.append(('ifindex', IP(ip), mac)) From 4f4e8df33f1b3d463407752f4ce0b323982db191 Mon Sep 17 00:00:00 2001 From: Joar Heimonen Date: Thu, 23 May 2024 16:02:09 +0200 Subject: [PATCH 18/20] added documentation fragment --- changelog.d/2924.fixed.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/2924.fixed.md diff --git a/changelog.d/2924.fixed.md b/changelog.d/2924.fixed.md new file mode 100644 index 0000000000..909b4702dc --- /dev/null +++ b/changelog.d/2924.fixed.md @@ -0,0 +1 @@ +Parsing paloalto XML based on keys instead of indexes \ No newline at end of file From f7c02e2a832f9b8b50714287a473c97a9f985827 Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Mon, 27 May 2024 10:51:32 +0200 Subject: [PATCH 19/20] Added Joar to contributors list --- AUTHORS.rst | 4 ++++ python/nav/web/templates/webfront/about.html | 1 + 2 files changed, 5 insertions(+) diff --git a/AUTHORS.rst b/AUTHORS.rst index e9b6bf8cfe..e8f8fef389 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -44,6 +44,10 @@ Other contributors and previous maintainers Active from 2017, until he left Uninett in 2019. An experienced Python developer who, among other things, rewrote the ipdevpoll multiprocess mode. +* Joar Heimonen + Contributed Palo Alto ARP plugin to enable ipdevpoll fetch ARP data from Palo + Alto firewalls. + * Ragnhild Bodsberg Contributed various bugfixes to NAV as an intern at Sikt, during the summer of 2022. diff --git a/python/nav/web/templates/webfront/about.html b/python/nav/web/templates/webfront/about.html index 786f313351..92ca58bcab 100644 --- a/python/nav/web/templates/webfront/about.html +++ b/python/nav/web/templates/webfront/about.html @@ -93,6 +93,7 @@

Contributors

  • Ilona Podliashanyk
  • John-Magne Bredal
  • Sigmund Augdal
  • +
  • Joar Heimonen
  • Ragnhild Bodsberg
  • Philipp Petermann
  • Leigh Murray
  • From 4797ef54fe28ec2d19241d5c12d3188301db3d70 Mon Sep 17 00:00:00 2001 From: Morten Brekkevold Date: Mon, 27 May 2024 13:22:13 +0200 Subject: [PATCH 20/20] Update changelog for 5.10.x release --- CHANGELOG.md | 14 ++++++++++++++ changelog.d/+2669.fixed.md | 1 - changelog.d/2913.fixed.md | 1 - changelog.d/2924.fixed.md | 1 - 4 files changed, 14 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/+2669.fixed.md delete mode 100644 changelog.d/2913.fixed.md delete mode 100644 changelog.d/2924.fixed.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 59ddc5a272..88c35ed8b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,20 @@ This project uses [*towncrier*](https://towncrier.readthedocs.io/) and the chang +## [5.10.1] - 2024-05-27 + + +### Fixed + +- Fix Machine Tracker DNS search crashing from exhausting all available file + descriptors ([#2669](https://github.com/Uninett/nav/issues/2669)) +- ARP records of unreachable devices are now closed by `navclean` cron job at + configurable expiry intervals, rather than immediately as a response to a + short ICMP packet loss ([#2913](https://github.com/Uninett/nav/issues/2913)) +- Palo Alto API XML responses are now parsed based on keys instead of indexes + ([#2924](https://github.com/Uninett/nav/issues/2924)) + + ## [5.10.0] - 2024-05-16 diff --git a/changelog.d/+2669.fixed.md b/changelog.d/+2669.fixed.md deleted file mode 100644 index 9e0822e9a1..0000000000 --- a/changelog.d/+2669.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fix Machine Tracker DNS search crashing from exhausting all available file descriptors \ No newline at end of file diff --git a/changelog.d/2913.fixed.md b/changelog.d/2913.fixed.md deleted file mode 100644 index 130ac7ef16..0000000000 --- a/changelog.d/2913.fixed.md +++ /dev/null @@ -1 +0,0 @@ -ARP records of unreachable devices are now closed by `navclean` cron job at configurable expiry intervals, rather than immediately as a response to a short ICMP packet loss \ No newline at end of file diff --git a/changelog.d/2924.fixed.md b/changelog.d/2924.fixed.md deleted file mode 100644 index 909b4702dc..0000000000 --- a/changelog.d/2924.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Parsing paloalto XML based on keys instead of indexes \ No newline at end of file