From a5c995fe22cae832ae7a741cf10e124ca9c91e28 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 08:48:49 +0200 Subject: [PATCH 01/29] commit --- sqlserver/datadog_checks/sqlserver/deadlocks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sqlserver/datadog_checks/sqlserver/deadlocks.py b/sqlserver/datadog_checks/sqlserver/deadlocks.py index edc922cbd753a..fe31280697c29 100644 --- a/sqlserver/datadog_checks/sqlserver/deadlocks.py +++ b/sqlserver/datadog_checks/sqlserver/deadlocks.py @@ -42,6 +42,7 @@ def agent_check_getter(self): class Deadlocks(DBMAsyncJob): def __init__(self, check, config: SQLServerConfig): + #comment to enforce the CI self.tags = [t for t in check.tags if not t.startswith('dd.internal')] self._check = check self._log = self._check.log From 28110e0e9789cb096971136708eb59d6ae17b349 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 06:54:02 +0000 Subject: [PATCH 02/29] linter --- sqlserver/datadog_checks/sqlserver/deadlocks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sqlserver/datadog_checks/sqlserver/deadlocks.py b/sqlserver/datadog_checks/sqlserver/deadlocks.py index fe31280697c29..b13c7b2d31196 100644 --- a/sqlserver/datadog_checks/sqlserver/deadlocks.py +++ b/sqlserver/datadog_checks/sqlserver/deadlocks.py @@ -42,7 +42,7 @@ def agent_check_getter(self): class Deadlocks(DBMAsyncJob): def __init__(self, check, config: SQLServerConfig): - #comment to enforce the CI + # comment to enforce the CI self.tags = [t for t in check.tags if not t.startswith('dd.internal')] self._check = check self._log = self._check.log From da4cbfcdd1c776da1bb8d4b16f5b5fb9d6991e8d Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 10:00:45 +0200 Subject: [PATCH 03/29] get compose logs --- datadog_checks_dev/datadog_checks/dev/docker.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index cf62dfd4c66aa..ecd44b865d083 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -242,8 +242,14 @@ def __init__(self, compose_file, build=False, service_name=None): self.command.append(self.service_name) def __call__(self): - return run_command(self.command, check=True) - + ret_value = None + try: + return run_command(self.command, check=True) + except Exception as SubprocessError: + compose_error = str(SubprocessError) + log_command = ['docker', 'compose', '-f', self.compose_file, 'logs'] + compose_logs = run_command(log_command, check=True) + raise SubprocessError(f"compose error: {compose_error} | compose logs: {compose_logs}") class ComposeFileLogs(LazyFunction): def __init__(self, compose_file, check=True): From fa0ff4a6077b43aef2665836b8c0ef4786a6d127 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 08:09:17 +0000 Subject: [PATCH 04/29] linter --- datadog_checks_dev/datadog_checks/dev/docker.py | 1 + sqlserver/datadog_checks/sqlserver/deadlocks.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index ecd44b865d083..5887d737810e3 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -251,6 +251,7 @@ def __call__(self): compose_logs = run_command(log_command, check=True) raise SubprocessError(f"compose error: {compose_error} | compose logs: {compose_logs}") + class ComposeFileLogs(LazyFunction): def __init__(self, compose_file, check=True): self.compose_file = compose_file diff --git a/sqlserver/datadog_checks/sqlserver/deadlocks.py b/sqlserver/datadog_checks/sqlserver/deadlocks.py index b13c7b2d31196..117a3649a1278 100644 --- a/sqlserver/datadog_checks/sqlserver/deadlocks.py +++ b/sqlserver/datadog_checks/sqlserver/deadlocks.py @@ -42,7 +42,7 @@ def agent_check_getter(self): class Deadlocks(DBMAsyncJob): def __init__(self, check, config: SQLServerConfig): - # comment to enforce the CI + # force ci self.tags = [t for t in check.tags if not t.startswith('dd.internal')] self._check = check self._log = self._check.log From 3a7249c0ddfc59999348af16b04056a599378814 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 10:37:57 +0200 Subject: [PATCH 05/29] linter --- datadog_checks_dev/datadog_checks/dev/docker.py | 1 - 1 file changed, 1 deletion(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index 5887d737810e3..bb2d6dfd4ff93 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -242,7 +242,6 @@ def __init__(self, compose_file, build=False, service_name=None): self.command.append(self.service_name) def __call__(self): - ret_value = None try: return run_command(self.command, check=True) except Exception as SubprocessError: From f597c7a37bccc4888dc55f15f4329ff4fa8290d1 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 11:50:23 +0200 Subject: [PATCH 06/29] import SubprocessError --- datadog_checks_dev/datadog_checks/dev/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index bb2d6dfd4ff93..ebc4b22e69d91 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -8,6 +8,7 @@ from .conditions import CheckDockerLogs from .env import environment_run, get_state, save_state +from .errors import SubprocessError from .fs import create_file, file_exists from .spec import load_spec from .structures import EnvVars, LazyFunction, TempDir From 185d89ab97b92faffb07baa31e7cc07522e709c8 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 12:03:01 +0200 Subject: [PATCH 07/29] fix SubprocessError --- datadog_checks_dev/datadog_checks/dev/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index ebc4b22e69d91..29844be4e3c32 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -245,8 +245,8 @@ def __init__(self, compose_file, build=False, service_name=None): def __call__(self): try: return run_command(self.command, check=True) - except Exception as SubprocessError: - compose_error = str(SubprocessError) + except SubprocessError as e: + compose_error = str(e) log_command = ['docker', 'compose', '-f', self.compose_file, 'logs'] compose_logs = run_command(log_command, check=True) raise SubprocessError(f"compose error: {compose_error} | compose logs: {compose_logs}") From 06236da7439864cf953276962e515c87a016b1df Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 12:29:53 +0200 Subject: [PATCH 08/29] remove -f --- datadog_checks_dev/datadog_checks/dev/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index 29844be4e3c32..1caafa708e0a6 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -247,7 +247,7 @@ def __call__(self): return run_command(self.command, check=True) except SubprocessError as e: compose_error = str(e) - log_command = ['docker', 'compose', '-f', self.compose_file, 'logs'] + log_command = ['docker', 'compose', self.compose_file, 'logs'] compose_logs = run_command(log_command, check=True) raise SubprocessError(f"compose error: {compose_error} | compose logs: {compose_logs}") From ced15ffc72d22748a07174d685069f1089860e1d Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 12:53:47 +0200 Subject: [PATCH 09/29] capture output --- datadog_checks_dev/datadog_checks/dev/docker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index 1caafa708e0a6..8910cb8739219 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -244,11 +244,11 @@ def __init__(self, compose_file, build=False, service_name=None): def __call__(self): try: - return run_command(self.command, check=True) + return run_command(self.command, check=True, capture = True) except SubprocessError as e: compose_error = str(e) - log_command = ['docker', 'compose', self.compose_file, 'logs'] - compose_logs = run_command(log_command, check=True) + log_command = ['docker', 'compose', '-f', self.compose_file, 'logs'] + compose_logs = run_command(log_command, check=True, capture = True) raise SubprocessError(f"compose error: {compose_error} | compose logs: {compose_logs}") From 7bb988b5dc6fc221a3acbdcdf5262d99280f20ec Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 11:08:32 +0000 Subject: [PATCH 10/29] linter --- datadog_checks_dev/datadog_checks/dev/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index 8910cb8739219..991f9a2189b47 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -244,11 +244,11 @@ def __init__(self, compose_file, build=False, service_name=None): def __call__(self): try: - return run_command(self.command, check=True, capture = True) + return run_command(self.command, check=True, capture=True) except SubprocessError as e: compose_error = str(e) log_command = ['docker', 'compose', '-f', self.compose_file, 'logs'] - compose_logs = run_command(log_command, check=True, capture = True) + compose_logs = run_command(log_command, check=True, capture=True) raise SubprocessError(f"compose error: {compose_error} | compose logs: {compose_logs}") From c6bfffc619207edac4be28881e13a4f89563e433 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 14:10:20 +0200 Subject: [PATCH 11/29] docker compose output --- datadog_checks_dev/datadog_checks/dev/docker.py | 8 +------- sqlserver/datadog_checks/sqlserver/deadlocks.py | 1 - 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index 991f9a2189b47..9b853e7f4de31 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -243,13 +243,7 @@ def __init__(self, compose_file, build=False, service_name=None): self.command.append(self.service_name) def __call__(self): - try: - return run_command(self.command, check=True, capture=True) - except SubprocessError as e: - compose_error = str(e) - log_command = ['docker', 'compose', '-f', self.compose_file, 'logs'] - compose_logs = run_command(log_command, check=True, capture=True) - raise SubprocessError(f"compose error: {compose_error} | compose logs: {compose_logs}") + return run_command(self.command, check=True, capture=True) class ComposeFileLogs(LazyFunction): diff --git a/sqlserver/datadog_checks/sqlserver/deadlocks.py b/sqlserver/datadog_checks/sqlserver/deadlocks.py index 117a3649a1278..edc922cbd753a 100644 --- a/sqlserver/datadog_checks/sqlserver/deadlocks.py +++ b/sqlserver/datadog_checks/sqlserver/deadlocks.py @@ -42,7 +42,6 @@ def agent_check_getter(self): class Deadlocks(DBMAsyncJob): def __init__(self, check, config: SQLServerConfig): - # force ci self.tags = [t for t in check.tags if not t.startswith('dd.internal')] self._check = check self._log = self._check.log From 4ebee2c954c4eeee828c0ab0a029ca4fd4eb76ec Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 14:11:21 +0200 Subject: [PATCH 12/29] remove import subprocesserror --- datadog_checks_dev/datadog_checks/dev/docker.py | 1 - 1 file changed, 1 deletion(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index 9b853e7f4de31..7641b40dced8c 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -8,7 +8,6 @@ from .conditions import CheckDockerLogs from .env import environment_run, get_state, save_state -from .errors import SubprocessError from .fs import create_file, file_exists from .spec import load_spec from .structures import EnvVars, LazyFunction, TempDir From 00ecfb472bbfb9724105e77f679f78a15ee52417 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 22:01:20 +0200 Subject: [PATCH 13/29] removed capture --- datadog_checks_dev/datadog_checks/dev/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index 7641b40dced8c..cf62dfd4c66aa 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -242,7 +242,7 @@ def __init__(self, compose_file, build=False, service_name=None): self.command.append(self.service_name) def __call__(self): - return run_command(self.command, check=True, capture=True) + return run_command(self.command, check=True) class ComposeFileLogs(LazyFunction): From 92f634f812363381a86a0e6e05ce043816107240 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Thu, 17 Oct 2024 22:11:44 +0200 Subject: [PATCH 14/29] force ci --- datadog_checks_dev/datadog_checks/dev/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index cf62dfd4c66aa..6cae0515fae7b 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -242,6 +242,7 @@ def __init__(self, compose_file, build=False, service_name=None): self.command.append(self.service_name) def __call__(self): + # force ci return run_command(self.command, check=True) From 9c06709bee13e6e42519ecfa18f95ad4c32a456e Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Fri, 18 Oct 2024 08:12:55 +0200 Subject: [PATCH 15/29] capture --- datadog_checks_dev/datadog_checks/dev/docker.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index 6cae0515fae7b..7641b40dced8c 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -242,8 +242,7 @@ def __init__(self, compose_file, build=False, service_name=None): self.command.append(self.service_name) def __call__(self): - # force ci - return run_command(self.command, check=True) + return run_command(self.command, check=True, capture=True) class ComposeFileLogs(LazyFunction): From 51d9dac274941bf4201818826d5339a18a2f13a1 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Sat, 19 Oct 2024 09:38:49 +0000 Subject: [PATCH 16/29] propagate capture --- datadog_checks_dev/datadog_checks/dev/docker.py | 13 ++++++++++--- sqlserver/tests/conftest.py | 4 +++- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index 7641b40dced8c..ef7272be88326 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -121,6 +121,7 @@ def docker_run( wrappers=None, attempts=None, attempts_wait=1, + capture=None, ): """ A convenient context manager for safely setting up and tearing down Docker environments. @@ -169,7 +170,10 @@ def docker_run( if not isinstance(compose_file, str): raise TypeError('The path to the compose file is not a string: {}'.format(repr(compose_file))) - set_up = ComposeFileUp(compose_file, build=build, service_name=service_name) + composeFileArgs = {'compose_file': compose_file, 'build': build, 'service_name': service_name} + if capture is not None: + composeFileArgs['capture'] = capture + set_up = ComposeFileUp(**composeFileArgs) if down is not None: tear_down = down else: @@ -229,7 +233,7 @@ def docker_run( class ComposeFileUp(LazyFunction): - def __init__(self, compose_file, build=False, service_name=None): + def __init__(self, compose_file, build=False, service_name=None, capture=None): self.compose_file = compose_file self.build = build self.service_name = service_name @@ -242,7 +246,10 @@ def __init__(self, compose_file, build=False, service_name=None): self.command.append(self.service_name) def __call__(self): - return run_command(self.command, check=True, capture=True) + args = {'check': True} + if self.capture is not None: + args['capture'] = self.capture + return run_command(self.command, **args) class ComposeFileLogs(LazyFunction): diff --git a/sqlserver/tests/conftest.py b/sqlserver/tests/conftest.py index dc28a32ecb29b..8f7d1dc9b3090 100644 --- a/sqlserver/tests/conftest.py +++ b/sqlserver/tests/conftest.py @@ -328,5 +328,7 @@ def high_cardinality_env_is_ready(): conditions += [CheckDockerLogs(compose_file, completion_message)] - with docker_run(compose_file=compose_file, conditions=conditions, mount_logs=True, build=True, attempts=3): + with docker_run( + compose_file=compose_file, conditions=conditions, mount_logs=True, build=True, attempts=3, capture=True + ): yield full_e2e_config, E2E_METADATA From 948111f0b6ce684072eaa4572af0db49a9c59d2a Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Sat, 19 Oct 2024 09:41:07 +0000 Subject: [PATCH 17/29] propagate capture --- sqlserver/tests/conftest.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sqlserver/tests/conftest.py b/sqlserver/tests/conftest.py index 8f7d1dc9b3090..add93fee97271 100644 --- a/sqlserver/tests/conftest.py +++ b/sqlserver/tests/conftest.py @@ -328,7 +328,5 @@ def high_cardinality_env_is_ready(): conditions += [CheckDockerLogs(compose_file, completion_message)] - with docker_run( - compose_file=compose_file, conditions=conditions, mount_logs=True, build=True, attempts=3, capture=True - ): + with docker_run(compose_file=compose_file, conditions=conditions, mount_logs=True, build=True, attempts=3, capture=True): yield full_e2e_config, E2E_METADATA From ff23ae14c648b07dfa43a7831001c360dc406e03 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Sat, 19 Oct 2024 10:10:11 +0000 Subject: [PATCH 18/29] self.capture --- datadog_checks_dev/datadog_checks/dev/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/datadog_checks_dev/datadog_checks/dev/docker.py b/datadog_checks_dev/datadog_checks/dev/docker.py index ef7272be88326..8a6579683c436 100644 --- a/datadog_checks_dev/datadog_checks/dev/docker.py +++ b/datadog_checks_dev/datadog_checks/dev/docker.py @@ -237,6 +237,7 @@ def __init__(self, compose_file, build=False, service_name=None, capture=None): self.compose_file = compose_file self.build = build self.service_name = service_name + self.capture = capture self.command = ['docker', 'compose', '-f', self.compose_file, 'up', '-d', '--force-recreate'] if self.build: From 6ab3b424d8f163073d0e03dc8ca371eb3e1134c2 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Sat, 19 Oct 2024 10:32:04 +0000 Subject: [PATCH 19/29] linter --- sqlserver/tests/conftest.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sqlserver/tests/conftest.py b/sqlserver/tests/conftest.py index add93fee97271..8f7d1dc9b3090 100644 --- a/sqlserver/tests/conftest.py +++ b/sqlserver/tests/conftest.py @@ -328,5 +328,7 @@ def high_cardinality_env_is_ready(): conditions += [CheckDockerLogs(compose_file, completion_message)] - with docker_run(compose_file=compose_file, conditions=conditions, mount_logs=True, build=True, attempts=3, capture=True): + with docker_run( + compose_file=compose_file, conditions=conditions, mount_logs=True, build=True, attempts=3, capture=True + ): yield full_e2e_config, E2E_METADATA From 4a3c5292191e5661294c5e5021c1e4f937236334 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Sat, 19 Oct 2024 20:10:14 +0000 Subject: [PATCH 20/29] test case for docker_run capture=True --- README.md | 59 ++----------------------- datadog_checks_dev/tests/test_docker.py | 14 +++++- 2 files changed, 16 insertions(+), 57 deletions(-) diff --git a/README.md b/README.md index fd081156a41a1..0cbd2845e3415 100644 --- a/README.md +++ b/README.md @@ -1,57 +1,6 @@ -# Datadog Integrations - Core +# Logs-only Integration -| | | -| --- | --- | -| CI/CD | [![CI - Test][1]][2] [![CI - Coverage][17]][18] | -| Docs | [![Docs - Release][19]][20] | -| Meta | [![Hatch project][26]][27] [![Linting - Ruff][24]][25] [![Code style - black][21]][22] [![Typing - Mypy][28]][29] [![License - BSD-3-Clause][30]][31] | +Choose this type of integration if you only need some pipelines and a configuration for collecting and processing logs through the Agent. -This repository contains open source integrations that Datadog officially develops and supports. -To add a new integration, please see the [Integrations Extras][5] repository and the -[accompanying documentation][6]. - -The [Datadog Agent][7] packages are equipped with all the Agent integrations from this -repository, so to get started using them, you can simply [install the Agent][8] -for your operating system. The [AGENT_CHANGELOG](AGENT_CHANGELOG.md) file shows -which Integrations have been updated in each Agent version. - -## Contributing - -Working with integrations is easy, the main page of the [development docs][6] -contains all the info you need to get your dev environment up and running in minutes -to run, test and build a Check. More advanced documentation can be found [here][3]. - -## Reporting Issues - -For more information on integrations, please reference our [documentation][11] and -[knowledge base][12]. You can also visit our [help page][13] to connect with us. - - -[1]: https://raw.githubusercontent.com/DataDog/integrations-core/badges/test-results.svg -[2]: https://github.com/DataDog/integrations-core/actions/workflows/master.yml -[3]: https://datadoghq.dev/integrations-core/ -[5]: https://github.com/DataDog/integrations-extras -[6]: https://docs.datadoghq.com/developers/integrations/ -[7]: https://github.com/DataDog/datadog-agent -[8]: https://app.datadoghq.com/account/settings/agent/latest -[9]: https://docs.pytest.org/en/latest/ -[10]: https://packaging.python.org/tutorials/distributing-packages/ -[11]: https://docs.datadoghq.com -[12]: https://help.datadoghq.com/hc/en-us -[13]: https://docs.datadoghq.com/help/ -[15]: https://github.com/DataDog/integrations-core/blob/6.2.1/requirements-integration-core.txt -[16]: https://github.com/DataDog/integrations-core/blob/ea2dfbf1e8859333af4c8db50553eb72a3b466f9/requirements-agent-release.txt -[17]: https://codecov.io/github/DataDog/integrations-core/coverage.svg?branch=master -[18]: https://codecov.io/github/DataDog/integrations-core?branch=master -[19]: https://github.com/DataDog/integrations-core/workflows/docs/badge.svg -[20]: https://github.com/DataDog/integrations-core/actions?workflow=docs -[21]: https://img.shields.io/badge/code%20style-black-000000.svg -[22]: https://github.com/ambv/black -[24]: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v0.json -[25]: https://github.com/charliermarsh/ruff -[26]: https://img.shields.io/badge/%F0%9F%A5%9A-Hatch-4051b5.svg -[27]: https://github.com/pypa/hatch -[28]: https://img.shields.io/badge/typing-Mypy-blue.svg -[29]: https://github.com/python/mypy -[30]: https://img.shields.io/badge/license-BSD--3--Clause-9400d3.svg -[31]: https://spdx.org/licenses/BSD-3-Clause.html +To help you get started with your config, this integration is turned into a Python package that can be installed in the Agent. +These integrations are released just like Agent Checks, and the changelog is managed with towncrier in integrations-core. diff --git a/datadog_checks_dev/tests/test_docker.py b/datadog_checks_dev/tests/test_docker.py index 16ca9ac76a6f3..52b4834d0c1d2 100644 --- a/datadog_checks_dev/tests/test_docker.py +++ b/datadog_checks_dev/tests/test_docker.py @@ -36,11 +36,21 @@ def test_up(self): class TestDockerRun: - def test_compose_file(self): + @pytest.mark.parametrize( + "capture", + [ + None, + True, + ], + ) + def test_compose_file(self, capture): compose_file = os.path.join(DOCKER_DIR, 'test_default.yaml') try: - with docker_run(compose_file): + args = {} + if capture is not None: + args['capture'] = capture + with docker_run(compose_file, **args): assert compose_file_active(compose_file) is True assert compose_file_active(compose_file) is False finally: From d5ef46a03a2d45fd9307a8288095858442ec87e2 Mon Sep 17 00:00:00 2001 From: Nenad Noveljic <18366081+nenadnoveljic@users.noreply.github.com> Date: Sun, 20 Oct 2024 08:32:56 +0000 Subject: [PATCH 21/29] restored README.md --- README.md | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 0cbd2845e3415..fd081156a41a1 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,57 @@ -# Logs-only Integration +# Datadog Integrations - Core -Choose this type of integration if you only need some pipelines and a configuration for collecting and processing logs through the Agent. +| | | +| --- | --- | +| CI/CD | [![CI - Test][1]][2] [![CI - Coverage][17]][18] | +| Docs | [![Docs - Release][19]][20] | +| Meta | [![Hatch project][26]][27] [![Linting - Ruff][24]][25] [![Code style - black][21]][22] [![Typing - Mypy][28]][29] [![License - BSD-3-Clause][30]][31] | -To help you get started with your config, this integration is turned into a Python package that can be installed in the Agent. -These integrations are released just like Agent Checks, and the changelog is managed with towncrier in integrations-core. +This repository contains open source integrations that Datadog officially develops and supports. +To add a new integration, please see the [Integrations Extras][5] repository and the +[accompanying documentation][6]. + +The [Datadog Agent][7] packages are equipped with all the Agent integrations from this +repository, so to get started using them, you can simply [install the Agent][8] +for your operating system. The [AGENT_CHANGELOG](AGENT_CHANGELOG.md) file shows +which Integrations have been updated in each Agent version. + +## Contributing + +Working with integrations is easy, the main page of the [development docs][6] +contains all the info you need to get your dev environment up and running in minutes +to run, test and build a Check. More advanced documentation can be found [here][3]. + +## Reporting Issues + +For more information on integrations, please reference our [documentation][11] and +[knowledge base][12]. You can also visit our [help page][13] to connect with us. + + +[1]: https://raw.githubusercontent.com/DataDog/integrations-core/badges/test-results.svg +[2]: https://github.com/DataDog/integrations-core/actions/workflows/master.yml +[3]: https://datadoghq.dev/integrations-core/ +[5]: https://github.com/DataDog/integrations-extras +[6]: https://docs.datadoghq.com/developers/integrations/ +[7]: https://github.com/DataDog/datadog-agent +[8]: https://app.datadoghq.com/account/settings/agent/latest +[9]: https://docs.pytest.org/en/latest/ +[10]: https://packaging.python.org/tutorials/distributing-packages/ +[11]: https://docs.datadoghq.com +[12]: https://help.datadoghq.com/hc/en-us +[13]: https://docs.datadoghq.com/help/ +[15]: https://github.com/DataDog/integrations-core/blob/6.2.1/requirements-integration-core.txt +[16]: https://github.com/DataDog/integrations-core/blob/ea2dfbf1e8859333af4c8db50553eb72a3b466f9/requirements-agent-release.txt +[17]: https://codecov.io/github/DataDog/integrations-core/coverage.svg?branch=master +[18]: https://codecov.io/github/DataDog/integrations-core?branch=master +[19]: https://github.com/DataDog/integrations-core/workflows/docs/badge.svg +[20]: https://github.com/DataDog/integrations-core/actions?workflow=docs +[21]: https://img.shields.io/badge/code%20style-black-000000.svg +[22]: https://github.com/ambv/black +[24]: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v0.json +[25]: https://github.com/charliermarsh/ruff +[26]: https://img.shields.io/badge/%F0%9F%A5%9A-Hatch-4051b5.svg +[27]: https://github.com/pypa/hatch +[28]: https://img.shields.io/badge/typing-Mypy-blue.svg +[29]: https://github.com/python/mypy +[30]: https://img.shields.io/badge/license-BSD--3--Clause-9400d3.svg +[31]: https://spdx.org/licenses/BSD-3-Clause.html From 4ce344e4726f51489436a52f059f749b0722e0ae Mon Sep 17 00:00:00 2001 From: Zhengda Lu Date: Thu, 17 Oct 2024 14:37:45 -0400 Subject: [PATCH 22/29] [mongo] add mongo recommended cluster monitors (#18858) * add mongo recommended monitors * fix typo --- mongo/assets/monitors/high_connections.json | 6 ++-- .../assets/monitors/high_fsstorage_usage.json | 35 +++++++++++++++++++ .../assets/monitors/high_replication_lag.json | 35 +++++++++++++++++++ mongo/assets/monitors/low_oplog_window.json | 35 +++++++++++++++++++ .../monitors/unhealthy_repliset_member.json | 34 ++++++++++++++++++ mongo/manifest.json | 6 +++- 6 files changed, 147 insertions(+), 4 deletions(-) create mode 100644 mongo/assets/monitors/high_fsstorage_usage.json create mode 100644 mongo/assets/monitors/high_replication_lag.json create mode 100644 mongo/assets/monitors/low_oplog_window.json create mode 100644 mongo/assets/monitors/unhealthy_repliset_member.json diff --git a/mongo/assets/monitors/high_connections.json b/mongo/assets/monitors/high_connections.json index 24afc5a88117e..cf0f2d40db26e 100644 --- a/mongo/assets/monitors/high_connections.json +++ b/mongo/assets/monitors/high_connections.json @@ -1,14 +1,14 @@ { "version": 2, "created_at": "2020-08-05", - "last_updated_at": "2021-01-11", + "last_updated_at": "2024-10-16", "title": "Connection pool is reaching saturation", "tags": [ "integration:mongodb" ], "description": "A connection pool helps reduce application latency and the number of times new connections are created. This monitor tracks the number of incoming connections to alert when the connection pool is near the saturation point.", "definition": { - "message": "The number of incoming connections is reaching the maximum. {{value}} % of the available connections have been used on {{replset_name.name}}", + "message": "The number of incoming connections is reaching the maximum. {{value}} % of the available connections have been used on MongoDB Cluster {{clustername.name}} Replica Set {{replset_name.name}}", "name": "[MongoDB] High incoming connections", "options": { "escalation_message": "", @@ -26,7 +26,7 @@ }, "timeout_h": 0 }, - "query": "avg(last_5m):100 * sum:mongodb.connections.current{*} by {replset_name} / ( sum:mongodb.connections.current{*} by {replset_name} + sum:mongodb.connections.available{*} by {replset_name} ) > 90", + "query": "avg(last_5m):100 * sum:mongodb.connections.current{*} by {clustername,replset_name} / ( sum:mongodb.connections.current{*} by {clustername,replset_name} + sum:mongodb.connections.available{*} by {clustername,replset_name} ) > 90", "tags": [ "integration:mongodb" ], diff --git a/mongo/assets/monitors/high_fsstorage_usage.json b/mongo/assets/monitors/high_fsstorage_usage.json new file mode 100644 index 0000000000000..f0db9505e9cfc --- /dev/null +++ b/mongo/assets/monitors/high_fsstorage_usage.json @@ -0,0 +1,35 @@ +{ + "version": 2, + "created_at": "2024-10-16", + "last_updated_at": "2024-10-16", + "title": "Used file system storage is reaching capacity", + "tags": [ + "integration:mongodb" + ], + "description": "This monitor tracks the used file system storage on a MongoDB server to alert when it is reaching capacity.", + "definition": { + "message": "The used file system storage is reaching capacity for database host {{database_instance.name}} on MongoDB Cluster {{clustername.name}}. {{value}} % of the total storage has been used.", + "name": "[MongoDB] High file system storage usage", + "options": { + "escalation_message": "", + "include_tags": true, + "locked": false, + "new_host_delay": 300, + "no_data_timeframe": null, + "notify_audit": false, + "notify_no_data": false, + "renotify_interval": "0", + "require_full_window": true, + "thresholds": { + "critical": 80, + "warning": 70 + }, + "timeout_h": 0 + }, + "query": "avg(last_60m):100 * avg:mongodb.stats.fsusedsize{*} by {clustername,database_instance} / avg:mongodb.stats.fstotalsize{*} by {clustername,database_instance} > 80", + "tags": [ + "integration:mongodb" + ], + "type": "query alert" + } + } \ No newline at end of file diff --git a/mongo/assets/monitors/high_replication_lag.json b/mongo/assets/monitors/high_replication_lag.json new file mode 100644 index 0000000000000..1428bbc6c966f --- /dev/null +++ b/mongo/assets/monitors/high_replication_lag.json @@ -0,0 +1,35 @@ +{ + "version": 2, + "created_at": "2024-10-16", + "last_updated_at": "2024-10-16", + "title": "High replication lag", + "tags": [ + "integration:mongodb" + ], + "description": "This monitor tracks the replication lag on a MongoDB replica set to alert when it is high.", + "definition": { + "message": "MongoDB Cluster {{clustername.name}} member {{member.name}} replication lag is high. The replication lag is {{value}} seconds.", + "name": "[MongoDB] High replication lag", + "options": { + "escalation_message": "", + "include_tags": true, + "locked": false, + "new_host_delay": 300, + "no_data_timeframe": null, + "notify_audit": false, + "notify_no_data": false, + "renotify_interval": "0", + "require_full_window": true, + "thresholds": { + "critical": 120, + "warning": 60 + }, + "timeout_h": 0 + }, + "query": "avg(last_5m):100 * avg:mongodb.replset.optime_lag{*} by {clustername,member} > 120", + "tags": [ + "integration:mongodb" + ], + "type": "query alert" + } + } \ No newline at end of file diff --git a/mongo/assets/monitors/low_oplog_window.json b/mongo/assets/monitors/low_oplog_window.json new file mode 100644 index 0000000000000..020fbb6408143 --- /dev/null +++ b/mongo/assets/monitors/low_oplog_window.json @@ -0,0 +1,35 @@ +{ + "version": 2, + "created_at": "2024-10-16", + "last_updated_at": "2024-10-16", + "title": "Low oplog window", + "tags": [ + "integration:mongodb" + ], + "description": "This monitor tracks the oplog window on a MongoDB replica set to alert when it is insufficient.", + "definition": { + "message": "Oplog window for database host {{database_instance.name}} on MongoDB Cluster {{clustername.name}} is below the threshold. The oplog window is {{value}} seconds.", + "name": "[MongoDB] Low oplog window", + "options": { + "escalation_message": "", + "include_tags": true, + "locked": false, + "new_host_delay": 300, + "no_data_timeframe": null, + "notify_audit": false, + "notify_no_data": false, + "renotify_interval": "0", + "require_full_window": true, + "thresholds": { + "critical": 3600, + "warning": 7200 + }, + "timeout_h": 0 + }, + "query": "avg(last_60m):100 * avg:mongodb.oplog.timediff{*} by {clustername,database_instance} < 3600", + "tags": [ + "integration:mongodb" + ], + "type": "query alert" + } + } \ No newline at end of file diff --git a/mongo/assets/monitors/unhealthy_repliset_member.json b/mongo/assets/monitors/unhealthy_repliset_member.json new file mode 100644 index 0000000000000..9e9342872418f --- /dev/null +++ b/mongo/assets/monitors/unhealthy_repliset_member.json @@ -0,0 +1,34 @@ +{ + "version": 2, + "created_at": "2024-10-16", + "last_updated_at": "2024-10-16", + "title": "Unhealthy replica set member", + "tags": [ + "integration:mongodb" + ], + "description": "This monitor tracks the health of a MongoDB replica set member to alert when it is unhealthy.", + "definition": { + "message": "MongoDB Cluster {{clustername.name}} replica set member {{database_instance.name}} is unhealthy.", + "name": "[MongoDB] Unhealthy replica set member", + "options": { + "escalation_message": "", + "include_tags": true, + "locked": false, + "new_host_delay": 300, + "no_data_timeframe": null, + "notify_audit": false, + "notify_no_data": false, + "renotify_interval": "0", + "require_full_window": true, + "thresholds": { + "critical": 1 + }, + "timeout_h": 0 + }, + "query": "max(last_5m):avg:mongodb.replset.health{*} by {clustername,database_instance} != 1", + "tags": [ + "integration:mongodb" + ], + "type": "query alert" + } + } \ No newline at end of file diff --git a/mongo/manifest.json b/mongo/manifest.json index bce35ffdf47a5..1cd6af62dd959 100644 --- a/mongo/manifest.json +++ b/mongo/manifest.json @@ -63,7 +63,11 @@ "mongodb": "assets/dashboards/overview.json" }, "monitors": { - "Connection pool is reaching saturation": "assets/monitors/high_connections.json" + "Connection pool is reaching saturation": "assets/monitors/high_connections.json", + "High replication lag": "assets/monitors/high_replication_lag.json", + "Low oplog window": "assets/monitors/low_oplog_window.json", + "Unhealthy replica set member": "assets/monitors/unhealthy_repliset_member.json", + "Used file system storage is reaching capacity": "assets/monitors/high_fsstorage_usage.json" }, "saved_views": { "operations_by_type_overview": "assets/saved_views/operations_by_type_overview.json", From 32cf609cba0300c1454eb5b5849fe7de2ed9159f Mon Sep 17 00:00:00 2001 From: Enrico Donnici Date: Fri, 18 Oct 2024 10:21:46 +0200 Subject: [PATCH 23/29] * Fix test results for .NET Core. (#18802) * Remove leading periods from trace captures for consistency --- .github/workflows/test-target.yml | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test-target.yml b/.github/workflows/test-target.yml index 74e84462ed859..52e5a5c399927 100644 --- a/.github/workflows/test-target.yml +++ b/.github/workflows/test-target.yml @@ -102,18 +102,25 @@ jobs: DDEV_E2E_AGENT_PY2: "${{ inputs.platform == 'windows' && (inputs.agent-image-windows-py2 || 'datadog/agent-dev:master-py2-win-servercore') || inputs.agent-image-py2 }}" # Test results for later processing TEST_RESULTS_BASE_DIR: "test-results" - TEST_RESULTS_DIR: "test-results/${{ inputs.job-name }}" # Tracing to monitor our test suite DD_ENV: "ci" DD_SERVICE: "ddev-integrations-${{ inputs.repo }}" DD_TAGS: "team:agent-integrations,platform:${{ inputs.platform }},integration:${{ inputs.target }}" DD_TRACE_ANALYTICS_ENABLED: "true" # Capture traces for a separate job to do the submission - TRACE_CAPTURE_BASE_DIR: ".trace-captures" - TRACE_CAPTURE_FILE: ".trace-captures/${{ inputs.job-name }}" - TRACE_CAPTURE_LOG: ".trace-captures/output.log" + TRACE_CAPTURE_BASE_DIR: "trace-captures" + TRACE_CAPTURE_LOG: "trace-captures/output.log" steps: + + - name: Set environment variables with sanitized paths + run: | + # We want to replace leading dots as they will make directories hidden, which will cause them to be ignored by upload-artifact and EnricoMi/publish-unit-test-result-action + JOB_NAME=$(echo "${{ inputs.job-name }}" | sed 's/^\./Dot/') + + echo "TEST_RESULTS_DIR=$TEST_RESULTS_BASE_DIR/$JOB_NAME" >> $GITHUB_ENV + echo "TRACE_CAPTURE_FILE=$TRACE_CAPTURE_BASE_DIR/$JOB_NAME" >> $GITHUB_ENV + - name: Set up Windows if: runner.os == 'Windows' run: |- From 3567d3aa7a8a599ed91b21f422036b02ef8bc2d0 Mon Sep 17 00:00:00 2001 From: HadhemiDD <43783545+HadhemiDD@users.noreply.github.com> Date: Fri, 18 Oct 2024 12:22:55 +0200 Subject: [PATCH 24/29] [Release] Bumped postgres version to 22.0.2 and tibco_ems version to 2.1.0 (#18872) * [Release] Bumped tibco_ems version to 2.1.0 * [Release] Bumped postgres version to 22.0.2 --- postgres/CHANGELOG.md | 6 ++++++ postgres/changelog.d/18866.fixed | 1 - postgres/datadog_checks/postgres/__about__.py | 2 +- requirements-agent-release.txt | 4 ++-- tibco_ems/CHANGELOG.md | 6 ++++++ tibco_ems/changelog.d/18840.added | 1 - tibco_ems/datadog_checks/tibco_ems/__about__.py | 2 +- 7 files changed, 16 insertions(+), 6 deletions(-) delete mode 100644 postgres/changelog.d/18866.fixed delete mode 100644 tibco_ems/changelog.d/18840.added diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 77c8d3f0a0687..05107ba22b3e2 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -2,6 +2,12 @@ +## 22.0.2 / 2024-10-18 + +***Fixed***: + +* Revert "Parse each row of pg_stat_activity separately inside a try/catch (#18762)" ([#18866](https://github.com/DataDog/integrations-core/pull/18866)) + ## 22.0.1 / 2024-10-14 ***Fixed***: diff --git a/postgres/changelog.d/18866.fixed b/postgres/changelog.d/18866.fixed deleted file mode 100644 index d4ff9d0caecc9..0000000000000 --- a/postgres/changelog.d/18866.fixed +++ /dev/null @@ -1 +0,0 @@ -Revert "Parse each row of pg_stat_activity separately inside a try/catch (#18762)" diff --git a/postgres/datadog_checks/postgres/__about__.py b/postgres/datadog_checks/postgres/__about__.py index d63d7a9be6019..882bf87b60564 100644 --- a/postgres/datadog_checks/postgres/__about__.py +++ b/postgres/datadog_checks/postgres/__about__.py @@ -2,4 +2,4 @@ # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = "22.0.1" +__version__ = "22.0.2" diff --git a/requirements-agent-release.txt b/requirements-agent-release.txt index 50c585038707f..ec257c0233140 100644 --- a/requirements-agent-release.txt +++ b/requirements-agent-release.txt @@ -141,7 +141,7 @@ datadog-pgbouncer==8.0.0; sys_platform != 'win32' datadog-php-fpm==5.0.0 datadog-ping-federate==2.0.0 datadog-postfix==3.0.0; sys_platform != 'win32' -datadog-postgres==22.0.1 +datadog-postgres==22.0.2 datadog-powerdns-recursor==4.0.0 datadog-presto==3.1.0 datadog-process==5.0.0 @@ -180,7 +180,7 @@ datadog-teleport==2.1.0 datadog-temporal==3.1.0 datadog-tenable==3.0.0 datadog-teradata==4.0.0; sys_platform != 'darwin' -datadog-tibco-ems==2.0.1; sys_platform != 'win32' +datadog-tibco-ems==2.1.0; sys_platform != 'win32' datadog-tls==4.0.0 datadog-tokumx==3.2.0 datadog-tomcat==4.0.0 diff --git a/tibco_ems/CHANGELOG.md b/tibco_ems/CHANGELOG.md index e6598d144cdea..36c78722ea863 100644 --- a/tibco_ems/CHANGELOG.md +++ b/tibco_ems/CHANGELOG.md @@ -2,6 +2,12 @@ +## 2.1.0 / 2024-10-18 + +***Added***: + +* Fix metric size unit parsing ([#18840](https://github.com/DataDog/integrations-core/pull/18840)) + ## 2.0.1 / 2024-10-04 ***Fixed***: diff --git a/tibco_ems/changelog.d/18840.added b/tibco_ems/changelog.d/18840.added deleted file mode 100644 index 27f3994548c56..0000000000000 --- a/tibco_ems/changelog.d/18840.added +++ /dev/null @@ -1 +0,0 @@ -Fix metric size unit parsing \ No newline at end of file diff --git a/tibco_ems/datadog_checks/tibco_ems/__about__.py b/tibco_ems/datadog_checks/tibco_ems/__about__.py index 4e2ee3360463e..f8405f9a7a8ae 100644 --- a/tibco_ems/datadog_checks/tibco_ems/__about__.py +++ b/tibco_ems/datadog_checks/tibco_ems/__about__.py @@ -1,4 +1,4 @@ # (C) Datadog, Inc. 2024-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) -__version__ = '2.0.1' +__version__ = '2.1.0' From b31c2891cf7be4c42fa8699905ff6df950a7e711 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?zoe=C2=A0=E2=9C=A8?= <9274242+zoedt@users.noreply.github.com> Date: Fri, 18 Oct 2024 09:16:54 -0400 Subject: [PATCH 25/29] [NDM] [Cisco ACI] Utilize raw ID for interface metadata (#18842) * Utilize raw ID for interface metadata * Add changelog * Interface ID tags to utilize the interface name not ID --- cisco_aci/changelog.d/18842.added | 1 + cisco_aci/datadog_checks/cisco_aci/models.py | 3 +++ cisco_aci/datadog_checks/cisco_aci/ndm.py | 2 ++ cisco_aci/tests/fixtures/metadata.py | 18 ++++++++++++++++++ 4 files changed, 24 insertions(+) create mode 100644 cisco_aci/changelog.d/18842.added diff --git a/cisco_aci/changelog.d/18842.added b/cisco_aci/changelog.d/18842.added new file mode 100644 index 0000000000000..59e3a59c27851 --- /dev/null +++ b/cisco_aci/changelog.d/18842.added @@ -0,0 +1 @@ +[NDM] [Cisco ACI] Utilize raw ID for interface metadata diff --git a/cisco_aci/datadog_checks/cisco_aci/models.py b/cisco_aci/datadog_checks/cisco_aci/models.py index 3081d8ffd685c..8031978213f3f 100644 --- a/cisco_aci/datadog_checks/cisco_aci/models.py +++ b/cisco_aci/datadog_checks/cisco_aci/models.py @@ -135,8 +135,11 @@ class Status(StrEnum): class InterfaceMetadata(BaseModel): device_id: Optional[str] = Field(default=None) id_tags: list = Field(default_factory=list) + raw_id: Optional[str] = Field(default=None) + raw_id_type: Optional[str] = Field(default='cisco_aci') index: Optional[int] = Field(default=None) name: Optional[str] = Field(default=None) + alias: Optional[str] = Field(default=None) description: Optional[str] = Field(default=None) mac_address: Optional[str] = Field(default=None) admin_status: Optional[AdminStatus] = Field(default=None) diff --git a/cisco_aci/datadog_checks/cisco_aci/ndm.py b/cisco_aci/datadog_checks/cisco_aci/ndm.py index 5681350d6b39f..7e9c066a72708 100644 --- a/cisco_aci/datadog_checks/cisco_aci/ndm.py +++ b/cisco_aci/datadog_checks/cisco_aci/ndm.py @@ -48,9 +48,11 @@ def create_interface_metadata(phys_if, address, namespace): eth = PhysIf(**phys_if.get('l1PhysIf', {})) interface = InterfaceMetadata( device_id='{}:{}'.format(namespace, address), + raw_id=eth.attributes.id, id_tags=['interface:{}'.format(eth.attributes.name)], index=eth.attributes.id, name=eth.attributes.name, + alias=eth.attributes.id, description=eth.attributes.desc, mac_address=eth.attributes.router_mac, admin_status=eth.attributes.admin_st, diff --git a/cisco_aci/tests/fixtures/metadata.py b/cisco_aci/tests/fixtures/metadata.py index 71a5a21fedcb8..e3b1236b34d8a 100644 --- a/cisco_aci/tests/fixtures/metadata.py +++ b/cisco_aci/tests/fixtures/metadata.py @@ -137,6 +137,8 @@ INTERFACE_METADATA = [ { 'admin_status': 1, + 'alias': 'eth1/1', + 'raw_id': 'eth1/1', 'device_id': 'default:10.0.200.0', 'id_tags': [ 'interface:eth1/1', @@ -150,6 +152,8 @@ }, { 'admin_status': 1, + 'alias': 'eth1/2', + 'raw_id': 'eth1/2', 'device_id': 'default:10.0.200.0', 'id_tags': [ 'interface:eth1/2', @@ -163,6 +167,8 @@ }, { 'admin_status': 1, + 'alias': 'eth1/3', + 'raw_id': 'eth1/3', 'device_id': 'default:10.0.200.0', 'id_tags': [ 'interface:eth1/3', @@ -176,6 +182,8 @@ }, { 'admin_status': 1, + 'alias': 'eth1/1', + 'raw_id': 'eth1/1', 'device_id': 'default:10.0.200.1', 'id_tags': [ 'interface:eth1/1', @@ -189,6 +197,8 @@ }, { 'admin_status': 1, + 'alias': 'eth1/2', + 'raw_id': 'eth1/2', 'device_id': 'default:10.0.200.1', 'id_tags': [ 'interface:eth1/2', @@ -202,6 +212,8 @@ }, { 'admin_status': 1, + 'alias': 'eth1/3', + 'raw_id': 'eth1/3', 'device_id': 'default:10.0.200.1', 'id_tags': [ 'interface:eth1/3', @@ -215,6 +227,8 @@ }, { 'admin_status': 1, + 'alias': 'eth5/1', + 'raw_id': 'eth5/1', 'device_id': 'default:10.0.200.5', 'id_tags': [ 'interface:eth5/1', @@ -228,6 +242,8 @@ }, { 'admin_status': 1, + 'alias': 'eth5/2', + 'raw_id': 'eth5/2', 'device_id': 'default:10.0.200.5', 'id_tags': [ 'interface:eth5/2', @@ -241,6 +257,8 @@ }, { 'admin_status': 1, + 'alias': 'eth7/1', + 'raw_id': 'eth7/1', 'device_id': 'default:10.0.200.5', 'id_tags': [ 'interface:eth7/1', From f05753ec5e97c2bfd24c038b9b719791ee287c30 Mon Sep 17 00:00:00 2001 From: Zhengda Lu Date: Fri, 18 Oct 2024 10:12:12 -0400 Subject: [PATCH 26/29] [postgres] Fix UnboundLocalError caused by referencing local variable start_time before assigned (#18870) * fix unboundlocal error * add test * add changelog --- postgres/changelog.d/18870.fixed | 1 + postgres/datadog_checks/postgres/metadata.py | 177 ++++++++++--------- postgres/tests/test_metadata.py | 33 ++++ 3 files changed, 126 insertions(+), 85 deletions(-) create mode 100644 postgres/changelog.d/18870.fixed diff --git a/postgres/changelog.d/18870.fixed b/postgres/changelog.d/18870.fixed new file mode 100644 index 0000000000000..7773fc1f3dd71 --- /dev/null +++ b/postgres/changelog.d/18870.fixed @@ -0,0 +1 @@ +Fix `UnboundLocalError` in postgres schema collection, ensuring proper reset of `_is_schemas_collection_in_progress` to allow consecutive collections. diff --git a/postgres/datadog_checks/postgres/metadata.py b/postgres/datadog_checks/postgres/metadata.py index bcf4179fa0cb5..09fd16752b992 100644 --- a/postgres/datadog_checks/postgres/metadata.py +++ b/postgres/datadog_checks/postgres/metadata.py @@ -275,98 +275,105 @@ def report_postgres_metadata(self): } self._check.database_monitoring_metadata(json.dumps(event, default=default_json_event_encoding)) - elapsed_s_schemas = time.time() - self._last_schemas_query_time - if ( - self._collect_schemas_enabled - and not self._is_schemas_collection_in_progress - and elapsed_s_schemas >= self.schemas_collection_interval - ): - self._is_schemas_collection_in_progress = True - status = "success" - try: - schema_metadata = self._collect_schema_info() - # We emit an event for each batch of tables to reduce total data in memory - # and keep event size reasonable - base_event = { - "host": self._check.resolved_hostname, - "agent_version": datadog_agent.get_version(), - "dbms": "postgres", - "kind": "pg_databases", - "collection_interval": self.schemas_collection_interval, - "dbms_version": self._payload_pg_version(), - "tags": self._tags_no_db, - "cloud_metadata": self._config.cloud_metadata, - } - - # Tuned from experiments on staging, we may want to make this dynamic based on schema size in the future - chunk_size = 50 - total_tables = 0 - start_time = time.time() - - for database in schema_metadata: - dbname = database["name"] - if not self._should_collect_metadata(dbname, "database"): - continue - - with self.db_pool.get_connection(dbname, self._config.idle_connection_timeout) as conn: - with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cursor: - for schema in database["schemas"]: - if not self._should_collect_metadata(schema["name"], "schema"): - continue - - tables = self._query_tables_for_schema(cursor, schema["id"], dbname) - self._log.debug( - "Tables found for schema '{schema}' in database '{database}':" - "{tables}".format( - schema=database["schemas"], - database=dbname, - tables=[table["name"] for table in tables], - ) - ) - table_chunks = list(get_list_chunks(tables, chunk_size)) + if not self._collect_schemas_enabled: + self._log.debug("Skipping schema collection because it is disabled") + return + if self._is_schemas_collection_in_progress: + self._log.debug("Skipping schema collection because it is in progress") + return + if time.time() - self._last_schemas_query_time < self.schemas_collection_interval: + self._log.debug("Skipping schema collection because it was recently collected") + return + + self._collect_postgres_schemas() - buffer_column_count = 0 - tables_buffer = [] + @tracked_method(agent_check_getter=agent_check_getter) + def _collect_postgres_schemas(self): + self._is_schemas_collection_in_progress = True + status = "success" + start_time = time.time() + total_tables = 0 + try: + schema_metadata = self._collect_schema_info() + # We emit an event for each batch of tables to reduce total data in memory + # and keep event size reasonable + base_event = { + "host": self._check.resolved_hostname, + "agent_version": datadog_agent.get_version(), + "dbms": "postgres", + "kind": "pg_databases", + "collection_interval": self.schemas_collection_interval, + "dbms_version": self._payload_pg_version(), + "tags": self._tags_no_db, + "cloud_metadata": self._config.cloud_metadata, + } + + # Tuned from experiments on staging, we may want to make this dynamic based on schema size in the future + chunk_size = 50 + + for database in schema_metadata: + dbname = database["name"] + if not self._should_collect_metadata(dbname, "database"): + continue + + with self.db_pool.get_connection(dbname, self._config.idle_connection_timeout) as conn: + with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cursor: + for schema in database["schemas"]: + if not self._should_collect_metadata(schema["name"], "schema"): + continue + + tables = self._query_tables_for_schema(cursor, schema["id"], dbname) + self._log.debug( + "Tables found for schema '{schema}' in database '{database}':" + "{tables}".format( + schema=database["schemas"], + database=dbname, + tables=[table["name"] for table in tables], + ) + ) + table_chunks = list(get_list_chunks(tables, chunk_size)) - for tables in table_chunks: - table_info = self._query_table_information(cursor, schema['name'], tables) + buffer_column_count = 0 + tables_buffer = [] - tables_buffer = [*tables_buffer, *table_info] - for t in table_info: - buffer_column_count += len(t.get("columns", [])) + for tables in table_chunks: + table_info = self._query_table_information(cursor, schema['name'], tables) - if buffer_column_count >= 100_000: - self._flush_schema(base_event, database, schema, tables_buffer) - total_tables += len(tables_buffer) - tables_buffer = [] - buffer_column_count = 0 + tables_buffer = [*tables_buffer, *table_info] + for t in table_info: + buffer_column_count += len(t.get("columns", [])) - if len(tables_buffer) > 0: + if buffer_column_count >= 100_000: self._flush_schema(base_event, database, schema, tables_buffer) total_tables += len(tables_buffer) - except Exception as e: - self._log.error("Error collecting schema metadata: %s", e) - status = "error" - finally: - elapsed_ms = (time.time() - start_time) * 1000 - self._check.histogram( - "dd.postgres.schema.time", - elapsed_ms, - tags=self._check.tags + ["status:" + status], - hostname=self._check.resolved_hostname, - raw=True, - ) - self._check.gauge( - "dd.postgres.schema.tables_count", - total_tables, - tags=self._check.tags + ["status:" + status], - hostname=self._check.resolved_hostname, - raw=True, - ) - datadog_agent.emit_agent_telemetry("postgres", "schema_tables_elapsed_ms", elapsed_ms, "gauge") - datadog_agent.emit_agent_telemetry("postgres", "schema_tables_count", total_tables, "gauge") - - self._is_schemas_collection_in_progress = False + tables_buffer = [] + buffer_column_count = 0 + + if len(tables_buffer) > 0: + self._flush_schema(base_event, database, schema, tables_buffer) + total_tables += len(tables_buffer) + except Exception as e: + self._log.error("Error collecting schema metadata: %s", e) + status = "error" + finally: + self._is_schemas_collection_in_progress = False + elapsed_ms = (time.time() - start_time) * 1000 + self._check.histogram( + "dd.postgres.schema.time", + elapsed_ms, + tags=self._check.tags + ["status:" + status], + hostname=self._check.resolved_hostname, + raw=True, + ) + self._check.gauge( + "dd.postgres.schema.tables_count", + total_tables, + tags=self._check.tags + ["status:" + status], + hostname=self._check.resolved_hostname, + raw=True, + ) + datadog_agent.emit_agent_telemetry("postgres", "schema_tables_elapsed_ms", elapsed_ms, "gauge") + datadog_agent.emit_agent_telemetry("postgres", "schema_tables_count", total_tables, "gauge") def _should_collect_metadata(self, name, metadata_type): for re_str in self._config.schemas_metadata_config.get( diff --git a/postgres/tests/test_metadata.py b/postgres/tests/test_metadata.py index 3a7978142b6af..c85a2ccafb6a2 100644 --- a/postgres/tests/test_metadata.py +++ b/postgres/tests/test_metadata.py @@ -4,6 +4,7 @@ from concurrent.futures.thread import ThreadPoolExecutor from typing import List +import mock import pytest from datadog_checks.base.utils.db.utils import DBMAsyncJob @@ -371,6 +372,38 @@ def test_collect_schemas_max_tables(integration_check, dbm_instance, aggregator) assert len(database_metadata[0]['schemas'][0]['tables']) == 1 +def test_collect_schemas_interrupted(integration_check, dbm_instance, aggregator): + dbm_instance["collect_schemas"] = {'enabled': True, 'collection_interval': 0.5, 'max_tables': 1} + dbm_instance['relations'] = [] + dbm_instance["database_autodiscovery"] = {"enabled": True, "include": ["datadog"]} + del dbm_instance['dbname'] + check = integration_check(dbm_instance) + with mock.patch('datadog_checks.postgres.metadata.PostgresMetadata._collect_schema_info', side_effect=Exception): + run_one_check(check, dbm_instance) + # ensures _is_schemas_collection_in_progress is reset to False after an exception + assert check.metadata_samples._is_schemas_collection_in_progress is False + dbm_metadata = aggregator.get_event_platform_events("dbm-metadata") + assert [e for e in dbm_metadata if e['kind'] == 'pg_databases'] == [] + + # next run should succeed + run_one_check(check, dbm_instance) + dbm_metadata = aggregator.get_event_platform_events("dbm-metadata") + + for schema_event in (e for e in dbm_metadata if e['kind'] == 'pg_databases'): + database_metadata = schema_event['metadata'] + assert len(database_metadata[0]['schemas'][0]['tables']) == 1 + + # Rerun check with relations enabled + dbm_instance['relations'] = [{'relation_regex': '.*'}] + check = integration_check(dbm_instance) + run_one_check(check, dbm_instance) + dbm_metadata = aggregator.get_event_platform_events("dbm-metadata") + + for schema_event in (e for e in dbm_metadata if e['kind'] == 'pg_databases'): + database_metadata = schema_event['metadata'] + assert len(database_metadata[0]['schemas'][0]['tables']) == 1 + + def assert_fields(keys: List[str], fields: List[str]): for field in fields: assert field in keys From f929f0ab1af70af03e5798b231f50f4dca83fe89 Mon Sep 17 00:00:00 2001 From: Gabriel Dos Santos <91925154+gabedos@users.noreply.github.com> Date: Fri, 18 Oct 2024 13:18:55 -0400 Subject: [PATCH 27/29] [CONTP-382] Add Validation Webhook telemetry metrics (#18867) * Add Validation Webhook telemetry * Updating webhooks_receieved description --- datadog_cluster_agent/changelog.d/18867.added | 1 + .../datadog_checks/datadog_cluster_agent/check.py | 1 + datadog_cluster_agent/metadata.csv | 3 ++- datadog_cluster_agent/tests/fixtures/metrics.txt | 5 ++++- datadog_cluster_agent/tests/test_datadog_cluster_agent.py | 1 + 5 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 datadog_cluster_agent/changelog.d/18867.added diff --git a/datadog_cluster_agent/changelog.d/18867.added b/datadog_cluster_agent/changelog.d/18867.added new file mode 100644 index 0000000000000..8374264cecc15 --- /dev/null +++ b/datadog_cluster_agent/changelog.d/18867.added @@ -0,0 +1 @@ +Add telemetry scraping for Validation AdmissionController diff --git a/datadog_cluster_agent/datadog_checks/datadog_cluster_agent/check.py b/datadog_cluster_agent/datadog_checks/datadog_cluster_agent/check.py index 1a1ece2ed9d95..e88ac2f4acdf6 100644 --- a/datadog_cluster_agent/datadog_checks/datadog_cluster_agent/check.py +++ b/datadog_cluster_agent/datadog_checks/datadog_cluster_agent/check.py @@ -11,6 +11,7 @@ 'admission_webhooks_library_injection_attempts': 'admission_webhooks.library_injection_attempts', 'admission_webhooks_library_injection_errors': 'admission_webhooks.library_injection_errors', 'admission_webhooks_mutation_attempts': 'admission_webhooks.mutation_attempts', + 'admission_webhooks_validation_attempts': 'admission_webhooks.validation_attempts', 'admission_webhooks_patcher_attempts': 'admission_webhooks.patcher.attempts', 'admission_webhooks_patcher_completed': 'admission_webhooks.patcher.completed', 'admission_webhooks_patcher_errors': 'admission_webhooks.patcher.errors', diff --git a/datadog_cluster_agent/metadata.csv b/datadog_cluster_agent/metadata.csv index 979d12072772d..00b14ceb90bf7 100644 --- a/datadog_cluster_agent/metadata.csv +++ b/datadog_cluster_agent/metadata.csv @@ -17,7 +17,8 @@ datadog.cluster_agent.admission_webhooks.reconcile_errors,gauge,,,,Number of rec datadog.cluster_agent.admission_webhooks.reconcile_success,gauge,,success,,Number of reconcile successes per controller,0,datadog_cluster_agent,admission webhooks reconcile success, datadog.cluster_agent.admission_webhooks.response_duration.count,count,,,,Webhook response duration count,0,datadog_cluster_agent,webhook response duration count, datadog.cluster_agent.admission_webhooks.response_duration.sum,count,,second,,Webhook response duration sum,0,datadog_cluster_agent,webhook response duration sum, -datadog.cluster_agent.admission_webhooks.webhooks_received,gauge,,,,Number of mutation webhook requests received,0,datadog_cluster_agent,admission webhooks received, +datadog.cluster_agent.admission_webhooks.validation_attempts,gauge,,,,Number of pod validation attempts by validation type,0,datadog_cluster_agent,admission webhooks validation attempts, +datadog.cluster_agent.admission_webhooks.webhooks_received,gauge,,,,Number of webhook requests received,0,datadog_cluster_agent,admission webhooks received, datadog.cluster_agent.aggregator.flush,count,,,,"Number of metrics/service checks/events flushed by (data_type, state)",0,datadog_cluster_agent,aggregator flush, datadog.cluster_agent.aggregator.processed,count,,,,Amount of metrics/services_checks/events processed by the aggregator by data_type,0,datadog_cluster_agent,aggregator processed, datadog.cluster_agent.api_requests,count,,request,,"Requests made to the cluster agent API by (handler, status)",0,datadog_cluster_agent,api requests, diff --git a/datadog_cluster_agent/tests/fixtures/metrics.txt b/datadog_cluster_agent/tests/fixtures/metrics.txt index dc1ee15bee468..018ba1b59fbf8 100644 --- a/datadog_cluster_agent/tests/fixtures/metrics.txt +++ b/datadog_cluster_agent/tests/fixtures/metrics.txt @@ -11,6 +11,9 @@ admission_webhooks_mutation_attempts{error="",injected="true",mutation_type="age admission_webhooks_mutation_attempts{error="",injected="true",mutation_type="agent_sidecar",status="success"} 1 admission_webhooks_mutation_attempts{error="",injected="true",mutation_type="cws_pod_instrumentation",status="success"} 2 admission_webhooks_mutation_attempts{error="",injected="true",mutation_type="lib_injection",status="success"} 1 +# HELP admission_webhooks_validation_attempts Number of pod validation attempts by validation type +# TYPE admission_webhooks_validation_attempts gauge +admission_webhooks_validation_attempts{error="",validated="true",webhook_name="kubernetes_audit",status="success"} 1 # HELP admission_webhooks_reconcile_errors Number of reconcile errors per controller. # TYPE admission_webhooks_reconcile_errors gauge admission_webhooks_reconcile_errors{controller="secrets"} 5 @@ -34,7 +37,7 @@ admission_webhooks_response_duration_bucket{le="10"} 108 admission_webhooks_response_duration_bucket{le="+Inf"} 108 admission_webhooks_response_duration_sum 0.4897835529999999 admission_webhooks_response_duration_count 108 -# HELP admission_webhooks_webhooks_received Number of mutation webhook requests received. +# HELP admission_webhooks_webhooks_received Number of webhook requests received. # TYPE admission_webhooks_webhooks_received gauge admission_webhooks_webhooks_received 300 # HELP aggregator__dogstatsd_contexts Count the number of dogstatsd contexts in the aggregator diff --git a/datadog_cluster_agent/tests/test_datadog_cluster_agent.py b/datadog_cluster_agent/tests/test_datadog_cluster_agent.py index 0bae2b520a5f9..d8c915e4ab465 100644 --- a/datadog_cluster_agent/tests/test_datadog_cluster_agent.py +++ b/datadog_cluster_agent/tests/test_datadog_cluster_agent.py @@ -21,6 +21,7 @@ 'admission_webhooks.library_injection_attempts', 'admission_webhooks.library_injection_errors', 'admission_webhooks.mutation_attempts', + 'admission_webhooks.validation_attempts', 'admission_webhooks.patcher.attempts', 'admission_webhooks.patcher.completed', 'admission_webhooks.patcher.errors', From 53a751a01146e277e05accfdb667d8a565a45d73 Mon Sep 17 00:00:00 2001 From: Zhengda Lu Date: Sun, 20 Oct 2024 17:57:12 +0000 Subject: [PATCH 28/29] test windows From f8b0ec65f2e3c6a1616bbbcf9dab65716dd9551b Mon Sep 17 00:00:00 2001 From: Zhengda Lu Date: Sun, 20 Oct 2024 18:02:06 +0000 Subject: [PATCH 29/29] trigger test --- sqlserver/tests/common.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sqlserver/tests/common.py b/sqlserver/tests/common.py index 611da0461bb4a..3b4cbfca21c08 100644 --- a/sqlserver/tests/common.py +++ b/sqlserver/tests/common.py @@ -41,6 +41,7 @@ def get_local_driver(): elif ON_WINDOWS: return '{ODBC Driver 18 for SQL Server}' else: + # default linux return '{ODBC Driver 18 for SQL Server}'