Skip to content

Commit

Permalink
Merge branch '5.10.x'
Browse files Browse the repository at this point in the history
  • Loading branch information
lunkwill42 committed May 27, 2024
2 parents 2206edf + 4797ef5 commit cff1ffb
Show file tree
Hide file tree
Showing 9 changed files with 188 additions and 83 deletions.
4 changes: 4 additions & 0 deletions AUTHORS.rst
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ Other contributors and previous maintainers
Active from 2017, until he left Uninett in 2019. An experienced Python
developer who, among other things, rewrote the ipdevpoll multiprocess mode.

* Joar Heimonen <contact at joar.me>
Contributed Palo Alto ARP plugin to enable ipdevpoll fetch ARP data from Palo
Alto firewalls.

* Ragnhild Bodsberg
Contributed various bugfixes to NAV as an intern at Sikt, during the summer
of 2022.
Expand Down
14 changes: 14 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,20 @@ This project uses [*towncrier*](https://towncrier.readthedocs.io/) and the chang

<!-- towncrier release notes start -->

## [5.10.1] - 2024-05-27


### Fixed

- Fix Machine Tracker DNS search crashing from exhausting all available file
descriptors ([#2669](https://github.com/Uninett/nav/issues/2669))
- ARP records of unreachable devices are now closed by `navclean` cron job at
configurable expiry intervals, rather than immediately as a response to a
short ICMP packet loss ([#2913](https://github.com/Uninett/nav/issues/2913))
- Palo Alto API XML responses are now parsed based on keys instead of indexes
([#2924](https://github.com/Uninett/nav/issues/2924))


## [5.10.0] - 2024-05-16


Expand Down
4 changes: 3 additions & 1 deletion doc/reference/backend-processes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,9 @@ dbclean
Regularly cleans out old data from the NAV database, using the
:program:`navclean` program. The standard cleanup routine removes old web user
interface sessions, and deletes IP devices that have been scheduled for
deletion through either SeedDB or the API.
deletion through either SeedDB or the API. Additionally, it closes open ARP
records that have been collected from routers that have been unreachable for
more than 30 minutes (adjustable by modifying the `dbclean` cron fragment).

:Dependencies:
None
Expand Down
58 changes: 39 additions & 19 deletions python/nav/asyncdns.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from IPy import IP
from twisted.names import dns
from twisted.names import client
from twisted.internet import defer
from twisted.internet import defer, task

# pylint: disable=E1101
from twisted.internet import reactor
Expand All @@ -46,6 +46,9 @@
from twisted.names.error import DNSNotImplementedError, DNSQueryRefusedError


BATCH_SIZE = 100


def reverse_lookup(addresses):
"""Runs parallel reverse DNS lookups for addresses.
Expand Down Expand Up @@ -75,21 +78,31 @@ def __init__(self):
)
self.results = defaultdict(list)
self._finished = False
self._errors = []

def resolve(self, names):
"""Resolves DNS names in parallel"""
self._finished = False
self.results = defaultdict(list)

deferred_list = []
for name in names:
for deferred in self.lookup(name):
deferred.addCallback(self._extract_records, name)
deferred.addErrback(self._errback, name)
deferred_list.append(deferred)

deferred_list = defer.DeferredList(deferred_list)
deferred_list.addCallback(self._parse_result)
self._finished = False
self._errors = []

def lookup_names():
for name in names:
for deferred in self.lookup(name):
deferred.addCallback(self._extract_records, name)
deferred.addErrback(self._errback, name)
deferred.addCallback(self._save_result)
yield deferred

# Limits the number of parallel requests to BATCH_SIZE
coop = task.Cooperator()
work = lookup_names()
deferred_list = defer.DeferredList(
[
coop.coiterate(work).addErrback(self._save_error)
for _ in range(BATCH_SIZE)
]
)
deferred_list.addCallback(self._finish)

while not self._finished:
Expand All @@ -98,6 +111,10 @@ def resolve(self, names):
# iteration to ensure the resolver library closes its UDP sockets
reactor.iterate()

# raise first error if any occurred
for error in self._errors:
raise error

return dict(self.results)

def lookup(self, name):
Expand All @@ -108,19 +125,22 @@ def lookup(self, name):
def _extract_records(result, name):
raise NotImplementedError

def _parse_result(self, result):
"""Parses the result to the correct format"""
for _success, (name, response) in result:
if isinstance(response, Exception):
self.results[name] = response
else:
self.results[name].extend(response)
def _save_result(self, result):
name, response = result
if isinstance(response, Exception):
self.results[name] = response
else:
self.results[name].extend(response)

@staticmethod
def _errback(failure, host):
"""Errback"""
return host, failure.value

def _save_error(self, failure):
"""Errback for coiterator. Saves error so it can be raised later"""
self._errors.append(failure.value)

def _finish(self, _):
self._finished = True

Expand Down
Loading

0 comments on commit cff1ffb

Please sign in to comment.