From 3b21080b34cfb5a505e02be98195b45b80e917d2 Mon Sep 17 00:00:00 2001 From: mbushkov Date: Mon, 23 Oct 2023 14:42:58 +0200 Subject: [PATCH] Syncing recent changes. (#1042) * devenv fixes and cleanups. Support for background processes added. * Dummy client action and flow added as a demo. * EficheckCollectHashes, EficheckDumpImage client actions removed. * Uninstall client action removed. * SendFile client action removed. * Refactoring the Interrogate flow to not rely on Forensic Artifacts "provides" section. * Removed "checks" functionality. * Removed a number of artifact parsers (eventual goal is to remove all of them). * In GRR Fleetspeak Frontend - optimised message processing to handle spikes of messages from the same client. * Deprecated no-op "keep_client_alive" attribute in ApiCreateClientApprovalArgs. * Deprecated ListClientActionRequests API call (was no-op after Fleetspeak migration). * Removed KeepAlive, FingerprintFile, FindFiles, SendFile, Uninstall, UpdateClient, CollectEfiHashes, DumpEfiImage flows. * Added GetCrowdStrikeAgentID flow. * Deprecated GetFile flow in favor of MultiGetFile. * Made FileFinder an alias to ClientFileFinder, using ClientFileFinder by default everywhere. Legacy FileFinder is still available as LegacyFileFinder. Fixed several inconsistencies in ClientFileFinder client action. Same for RegistryFinder. * Added StatMultipleFiles and HashMultipleFiles flows to be used in UIv2. * Collecting more detailed stats about flow errors. * Newly interrogated hunts now pick up active hunts automatically. * UI work: hunts table, Stat/Hash multiple files flows, hunt graphs. --- CHANGELOG | 59 -- CHANGELOG.md | 210 ++++ api_client/python/grr_api_client/client.py | 13 +- api_client/python/grr_api_client/utils.py | 11 +- colab/grr_colab/errors.py | 2 +- colab/grr_colab/fs.py | 31 +- devenv/README.md | 30 +- devenv/src/config.py | 2 +- devenv/src/mypy.ini | 3 - devenv/src/pylintrc | 8 - devenv/src/pytest.ini | 1 - devenv/src/reslib.py | 257 ++++- devenv/src/util/__init__.py | 50 + devenv/src/util/term.py | 6 + grr/client/grr_response_client/actions.py | 4 - .../client_actions/action_test.py | 26 +- .../client_actions/dummy.py | 24 + .../client_actions/dummy_test.py | 53 + .../client_actions/file_finder.py | 25 +- .../file_finder_utils/globbing.py | 16 +- .../client_actions/linux/linux.py | 12 - .../client_actions/memory.py | 21 +- .../client_actions/operating_system.py | 1 - .../client_actions/osx/firmware.py | 153 --- .../client_actions/osx/firmware_test.py | 127 --- .../client_actions/osx/osx.py | 41 - .../client_actions/registry_init.py | 15 +- .../client_actions/standard.py | 58 -- .../client_actions/tempfiles_test.py | 3 +- .../client_actions/vfs_file_finder.py | 12 +- .../client_actions/windows/dummy.py | 24 + .../client_actions/windows/dummy_test.py | 54 + .../client_actions/windows/windows.py | 23 - .../grr_response_client/client_utils.py | 1 - .../client_utils_common.py | 2 + .../grr_response_client/client_utils_linux.py | 5 - .../grr_response_client/client_utils_osx.py | 5 - .../client_utils_windows.py | 8 - .../build_helpers.py | 4 +- .../build_helpers_test.py | 18 +- .../grr_response_client_builder/pkg_utils.py | 4 +- grr/core/grr_response_core/config/__init__.py | 1 - .../grr_response_core/config/artifacts.py | 34 +- grr/core/grr_response_core/config/checks.py | 14 - grr/core/grr_response_core/config/server.py | 9 + grr/core/grr_response_core/lib/parsers/all.py | 5 - .../lib/parsers/eficheck_parser.py | 39 - .../lib/parsers/eficheck_parser_test.py | 41 - .../lib/parsers/linux_release_parser.py | 26 +- .../lib/parsers/linux_release_parser_test.py | 42 +- .../lib/parsers/osx_file_parser.py | 5 +- .../lib/parsers/wmi_parser.py | 80 -- .../lib/parsers/wmi_parser_test.py | 54 - grr/core/grr_response_core/lib/rdfvalue.py | 23 + .../grr_response_core/lib/rdfvalue_test.py | 24 + .../lib/rdfvalues/apple_firmware.py | 48 - .../lib/rdfvalues/artifacts.py | 2 +- .../lib/rdfvalues/client_action.py | 16 - .../grr_response_core/lib/rdfvalues/crypto.py | 15 - .../lib/rdfvalues/crypto_test.py | 22 +- .../grr_response_core/lib/rdfvalues/dummy.py | 19 + .../grr_response_core/lib/rdfvalues/flows.py | 19 - .../lib/rdfvalues/standard.py | 2 +- .../lib/rdfvalues/structs.py | 13 +- grr/core/grr_response_core/lib/util/cache.py | 219 +++- .../grr_response_core/lib/util/cache_test.py | 253 ++++- .../grr_response_core/lib/util/filesystem.py | 26 +- .../stats/stats_test_utils.py | 25 + .../grr_response_core/stats/stats_utils.py | 7 +- grr/proto/grr_response_proto/api/client.proto | 32 - grr/proto/grr_response_proto/api/user.proto | 6 +- .../grr_response_proto/apple_firmware.proto | 57 -- .../grr_response_proto/crowdstrike.proto | 12 + grr/proto/grr_response_proto/deprecated.proto | 162 +++ grr/proto/grr_response_proto/dummy.proto | 23 + grr/proto/grr_response_proto/export.proto | 6 +- grr/proto/grr_response_proto/flows.proto | 76 +- grr/proto/grr_response_proto/jobs.proto | 36 +- .../grr_response_proto/knowledge_base.proto | 2 + grr/proto/grr_response_proto/rrg.proto | 9 +- .../rrg/action/list_connections.proto | 14 + grr/proto/grr_response_proto/rrg/net.proto | 93 ++ .../grr_response_server/action_registry.py | 5 +- grr/server/grr_response_server/artifact.py | 968 +++++++++++++++++- .../grr_response_server/artifact_registry.py | 17 + .../artifact_registry_test.py | 11 + .../grr_response_server/artifact_test.py | 80 +- .../bin/fleetspeak_frontend_server.py | 38 +- grr/server/grr_response_server/blob_store.py | 16 + .../blob_store_test_mixin.py | 21 + .../grr_response_server/client_fixture.py | 34 + .../grr_response_server/databases/db.py | 120 +-- .../databases/db_clients_test.py | 106 +- .../databases/db_flows_test.py | 231 +---- .../databases/db_foreman_rules_test.py | 16 +- .../databases/db_test_utils_test.py | 15 + .../grr_response_server/databases/db_utils.py | 12 +- .../grr_response_server/databases/mem.py | 2 - .../databases/mem_clients.py | 27 +- .../databases/mem_flows.py | 96 -- .../databases/mysql_clients.py | 98 +- .../databases/mysql_flows.py | 141 +-- .../databases/mysql_migrations/0021.sql | 2 + .../export_converters/memory.py | 3 +- grr/server/grr_response_server/flow_base.py | 55 +- .../grr_response_server/flow_base_test.py | 135 ++- grr/server/grr_response_server/flow_test.py | 11 +- grr/server/grr_response_server/flows/file.py | 10 +- .../grr_response_server/flows/file_test.py | 72 +- .../flows/general/administrative.py | 166 +-- .../flows/general/administrative_test.py | 89 +- .../flows/general/apple_firmware.py | 120 --- .../flows/general/apple_firmware_test.py | 178 ---- .../flows/general/collectors_test.py | 54 +- .../flows/general/crowdstrike.py | 149 +++ .../flows/general/crowdstrike_test.py | 203 ++++ .../flows/general/discovery.py | 40 +- .../flows/general/discovery_test.py | 125 ++- .../flows/general/dummy.py | 78 ++ .../flows/general/dummy_test.py | 104 ++ .../flows/general/file_finder.py | 63 +- .../flows/general/file_finder_test.py | 451 +++++--- .../flows/general/filesystem.py | 42 + .../grr_response_server/flows/general/find.py | 95 -- .../flows/general/find_test.py | 163 --- .../flows/general/fingerprint.py | 45 +- .../flows/general/fingerprint_test.py | 75 -- .../flows/general/osquery.py | 2 +- .../flows/general/registry.py | 22 +- .../flows/general/registry_finder_test.py | 59 +- .../flows/general/registry_init.py | 5 +- .../flows/general/registry_test.py | 14 +- .../flows/general/transfer.py | 39 +- .../flows/general/webhistory_test.py | 2 +- grr/server/grr_response_server/foreman.py | 58 +- .../grr_response_server/frontend_lib.py | 16 +- .../grr_response_server/frontend_lib_test.py | 25 + .../gui/api_call_router.py | 9 - .../api_call_router_with_approval_checks.py | 41 +- ...i_call_router_with_approval_checks_test.py | 44 +- .../gui/api_call_router_without_checks.py | 3 - .../gui/api_integration_tests/hunt_test.py | 3 +- .../gui/api_plugins/client.py | 74 +- .../gui/api_plugins/client_regression_test.py | 70 -- .../gui/api_plugins/client_test.py | 19 + .../gui/api_plugins/flow.py | 4 +- .../gui/api_plugins/flow_regression_test.py | 13 +- .../gui/api_plugins/hunt.py | 2 +- .../gui/api_plugins/user.py | 13 - .../gui/api_plugins/user_test.py | 10 - .../gui/api_plugins/yara.py | 2 +- .../gui/api_regression_http.py | 17 - .../grr_response_server/gui/gui_test_lib.py | 23 +- .../gui/selenium_tests/flow_copy_test.py | 17 +- .../selenium_tests/flow_management_test.py | 53 +- .../gui/selenium_tests/hunt_archive_test.py | 2 +- .../gui/selenium_tests/hunt_view_test.py | 29 +- .../gui/selenium_tests/notifications_test.py | 4 +- .../gui/selenium_tests/v2/file_test.py | 415 ++++++++ .../acl/request-approval-dialog-directive.js | 6 - .../request-approval-dialog-directive_test.js | 44 - .../acl/request-approval-dialog.html | 10 - .../docs/api-docs-examples.json | 888 ++++++++++------ .../docs/api-v2-docs-examples.json | 320 +++--- .../components/approval_card/approval_card.ts | 4 +- .../components/approval_chip/approval_chip.ts | 2 +- .../entry_history_button.ts | 2 +- .../data_renderers/process/process_view.scss | 1 + .../data_renderers/table/flow_table.ts | 54 + .../data_renderers/table/flow_table_test.ts | 152 +++ .../data_renderers/table/table.scss | 39 + .../components/data_renderers/table/table.ts | 19 +- .../expandable_hash/expandable_hash.ts | 2 +- .../components/file_details/file_details.ts | 2 +- .../flow_args_form/flow_args_form.ts | 2 +- .../flow_args_form/flow_args_form_test.ts | 2 +- .../osquery_query_helper/table_info_item.ts | 2 +- .../ui/components/flow_args_form/sub_forms.ts | 4 +- .../components/flow_details/flow_details.ts | 2 +- .../helpers/osquery_results_table.ts | 2 +- .../flow_details/plugin_registry.ts | 4 + .../hash_multiple_files_details.ng.html | 8 + .../plugins/hash_multiple_files_details.ts | 71 ++ .../hash_multiple_files_details_test.ts | 53 + .../components/flow_details/plugins/module.ts | 4 + .../components/flow_details/plugins/plugin.ts | 2 +- .../stat_multiple_files_details.ng.html | 8 + .../plugins/stat_multiple_files_details.ts | 73 ++ .../stat_multiple_files_details_test.ts | 51 + .../ui/components/flow_list/flow_list_test.ts | 22 +- .../components/flow_picker/flow_list_item.ts | 126 ++- .../flow_picker/flow_picker.ng.html | 1 + .../components/flow_picker/flows_overview.ts | 2 +- .../glob_expression_input.ts | 2 +- .../recent_client_flows.ts | 2 +- .../recent_client_flows_test.ts | 8 +- .../hunt/hunt_arguments/hunt_arguments.ts | 2 +- .../hunt_flow_arguments.ts | 2 +- .../hunt_original_reference.ts | 2 +- .../hunt_progress/hunt_progress.ng.html | 40 +- .../hunt_progress/hunt_progress.scss | 15 +- .../hunt_page/hunt_progress/hunt_progress.ts | 101 +- .../hunt_progress/hunt_progress_test.ts | 691 +++++++++++-- .../hunt/hunt_page/hunt_progress/module.ts | 6 + .../hunt_progress_chart.ng.html | 7 + .../hunt_progress_chart.scss | 9 + .../hunt_progress_chart.ts | 192 ++++ .../hunt_progress_chart_test.ts | 163 +++ .../hunt/hunt_status_chip/hunt_status_chip.ts | 2 +- .../components/hunt/new_hunt/new_hunt_test.ts | 26 +- .../gui/ui/lib/api/api_interfaces.ts | 138 +-- .../gui/ui/lib/api_translation/client.ts | 4 +- .../gui/ui/lib/api_translation/flow_test.ts | 6 +- .../gui/ui/lib/api_translation/result.ts | 6 + .../gui/ui/lib/dataviz/chart_legend.ts | 103 ++ .../gui/ui/lib/dataviz/chart_legend_test.ts | 133 +++ .../gui/ui/lib/dataviz/line_chart.ts | 568 ++++++++++ .../gui/ui/lib/dataviz/line_chart_test.ts | 580 +++++++++++ .../gui/ui/lib/dataviz/padding.ts | 18 + .../gui/ui/lib/markdown.ts | 2 +- .../gui/ui/lib/models/flow.ts | 16 +- .../gui/ui/package-lock.json | 555 +++++++++- .../grr_response_server/gui/ui/package.json | 2 + .../ui/store/client_page_global_store_test.ts | 22 +- .../gui/ui/store/config_global_store_test.ts | 9 +- .../gui/ui/store/new_hunt_local_store_test.ts | 12 +- grr/server/grr_response_server/hunt_test.py | 143 ++- .../output_plugins/email_plugin.py | 5 +- .../rdfvalues/flow_objects.py | 19 - .../grr_response_server/rdfvalues/objects.py | 21 +- .../rdfvalues/objects_test.py | 55 +- .../grr_response_server/server_logging.py | 31 + .../server_logging_test.py | 51 +- .../grr_response_server/server_stubs.py | 36 +- .../end_to_end_tests/tests/__init__.py | 1 - .../end_to_end_tests/tests/dummy.py | 58 ++ .../end_to_end_tests/tests/filesystem.py | 62 -- .../end_to_end_tests/tests/fingerprint.py | 44 - .../end_to_end_tests/tests/registry.py | 14 - .../grr_response_test/run_self_update_test.py | 194 ---- .../test_data/parser_test/redhat-release | 1 + .../test_data/parser_test/rocky-release | 1 + grr/test_lib/action_mocks.py | 14 +- grr/test_lib/flow_test_lib.py | 41 +- grr/test_lib/hunt_test_lib.py | 83 +- grr/test_lib/test_lib.py | 3 +- grr/test_lib/vfs_test_lib.py | 6 +- 247 files changed, 10161 insertions(+), 4874 deletions(-) delete mode 100644 CHANGELOG create mode 100644 CHANGELOG.md delete mode 100644 devenv/src/mypy.ini delete mode 100644 devenv/src/pylintrc delete mode 100644 devenv/src/pytest.ini create mode 100644 grr/client/grr_response_client/client_actions/dummy.py create mode 100644 grr/client/grr_response_client/client_actions/dummy_test.py delete mode 100644 grr/client/grr_response_client/client_actions/osx/firmware.py delete mode 100644 grr/client/grr_response_client/client_actions/osx/firmware_test.py create mode 100644 grr/client/grr_response_client/client_actions/windows/dummy.py create mode 100644 grr/client/grr_response_client/client_actions/windows/dummy_test.py delete mode 100644 grr/core/grr_response_core/config/checks.py delete mode 100644 grr/core/grr_response_core/lib/parsers/eficheck_parser.py delete mode 100644 grr/core/grr_response_core/lib/parsers/eficheck_parser_test.py delete mode 100644 grr/core/grr_response_core/lib/rdfvalues/apple_firmware.py create mode 100644 grr/core/grr_response_core/lib/rdfvalues/dummy.py delete mode 100644 grr/proto/grr_response_proto/apple_firmware.proto create mode 100644 grr/proto/grr_response_proto/crowdstrike.proto create mode 100644 grr/proto/grr_response_proto/dummy.proto create mode 100644 grr/proto/grr_response_proto/rrg/action/list_connections.proto create mode 100644 grr/proto/grr_response_proto/rrg/net.proto create mode 100644 grr/server/grr_response_server/databases/mysql_migrations/0021.sql delete mode 100644 grr/server/grr_response_server/flows/general/apple_firmware.py delete mode 100644 grr/server/grr_response_server/flows/general/apple_firmware_test.py create mode 100644 grr/server/grr_response_server/flows/general/crowdstrike.py create mode 100644 grr/server/grr_response_server/flows/general/crowdstrike_test.py create mode 100644 grr/server/grr_response_server/flows/general/dummy.py create mode 100644 grr/server/grr_response_server/flows/general/dummy_test.py delete mode 100644 grr/server/grr_response_server/flows/general/find.py delete mode 100644 grr/server/grr_response_server/flows/general/find_test.py delete mode 100644 grr/server/grr_response_server/flows/general/fingerprint_test.py create mode 100644 grr/server/grr_response_server/gui/ui/components/data_renderers/table/flow_table.ts create mode 100644 grr/server/grr_response_server/gui/ui/components/data_renderers/table/flow_table_test.ts create mode 100644 grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details.ng.html create mode 100644 grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details.ts create mode 100644 grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details_test.ts create mode 100644 grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details.ng.html create mode 100644 grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details.ts create mode 100644 grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details_test.ts create mode 100644 grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.ng.html create mode 100644 grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.scss create mode 100644 grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.ts create mode 100644 grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart_test.ts create mode 100644 grr/server/grr_response_server/gui/ui/lib/dataviz/chart_legend.ts create mode 100644 grr/server/grr_response_server/gui/ui/lib/dataviz/chart_legend_test.ts create mode 100644 grr/server/grr_response_server/gui/ui/lib/dataviz/line_chart.ts create mode 100644 grr/server/grr_response_server/gui/ui/lib/dataviz/line_chart_test.ts create mode 100644 grr/server/grr_response_server/gui/ui/lib/dataviz/padding.ts create mode 100644 grr/test/grr_response_test/end_to_end_tests/tests/dummy.py delete mode 100644 grr/test/grr_response_test/end_to_end_tests/tests/fingerprint.py delete mode 100644 grr/test/grr_response_test/run_self_update_test.py create mode 100644 grr/test/grr_response_test/test_data/parser_test/redhat-release create mode 100644 grr/test/grr_response_test/test_data/parser_test/rocky-release diff --git a/CHANGELOG b/CHANGELOG deleted file mode 100644 index cde49adfc6..0000000000 --- a/CHANGELOG +++ /dev/null @@ -1,59 +0,0 @@ -# Changelog (important and/or breaking changes). - -## Upcoming release - -* Renamed AdminUI.new_hunt_wizard.default_output_plugin to - AdminUI.new_hunt_wizard.default_output_plugins (note the "s" in the end). - The new option accepts a comma-separated list of names. -* Fully removed deprecated use_tsk flag. -* Removed deprecated plugin_args field from OutputPluginDescriptor. - -## 3.4.6.7 - -* Introduced Server.grr_binaries_readonly configuration option (set to False - by default). When set to True, binaries and python hacks can't be overriden - or deleted. -* Added configuration option Monitoring.http_address to specify server address - of stats server. Default value will remain 127.0.0.1. -* Updates elasticsearch output plugin post request to _bulk in the - elasticsearch api. Adds a terminating \n and content type headers for - application/json. - -## 3.4.3.1 - -* Introduced Hunt.default_client_rate configuration option. - -## 3.4.2.4 - -* The server YAML configuration options path_globs_blacklist and - path_globs_whitelist in get_flow_files_archive of router_params of - ApiCallRobotRouter have been renamed to exclude_path_globs and - include_only_path_globs. -* The server YAML configuration option Artifacts.netgroup_user_blacklist has - been renamed to Artifacts.netgroup_ignore_users. -* The server YAML configuration options labels_whitelist and - labels_owners_whitelist in router_params of ApiLabelsRestrictedCallRouter - have been renamed to allow_labels and allow_labels_owners. -* The server YAML configuration option artifacts_whitelist of - artifact_collector_flow of router_params of ApiCallRobotRouter has been - renamed to allow_artifacts. -* The `ExecutePythonHack` flow returns a `ExecutePythonHackResponse` message - rather than raw string object as a response. -* ApiHunt.hunt_type was introduced and should be used instead of - a now-deprecated ApiHunt.name. -* Variable hunts now have their arguments filled in the ApiHunt.flow_args - attribute. -* JSON representation of `st_ino`, `st_dev`, `st_nlink`, `st_blocks`, - `st_blksize`, `st_rdev` fields of `StatEntry` now use strings rather than - integers. This is a consequence of increasing the supported integer size of - these values which might be out of bounds for JSON numbers. -* The `st_crtime` field of `StatEntry` has been renamed to `st_btime`. -* ArtifactCollectorFlowArgs, ArtifactFilesDownloaderFlowArgs: - * use_tsk is replaced with use_raw_filesystem_access - * use_tsk is kept for compatibility until 2021-04-01 - * please migrate away from use_tsk to use_raw_filesystem_access until then - * ValueError is raised if both fields are set -* WinUserActivityInvestigationArgs: - * This message is obsolete, removing it. -* ClientArtifactCollectorArgs - * Removing use_tsk, since it hasn't been used on the client side diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..d8f5f20343 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,210 @@ +# Changelog (important and/or breaking changes). + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added + +* Created a flow for collecting an identifier of the CrowdStrike agent. + +### Changed + +* Renamed AdminUI.new_hunt_wizard.default_output_plugin to + AdminUI.new_hunt_wizard.default_output_plugins (note the "s" in the end). + The new option accepts a comma-separated list of names. + +### Removed + +* Fully removed deprecated use_tsk flag. +* Removed deprecated plugin_args field from OutputPluginDescriptor. +* Removed deprecated flows: FingerprintFile + +## [3.4.6.7] - 2023-03-22 + +### API removed + +* Removed the `labels` field from the `Artifact` message. This change has been + done in anticipation of the removal of the same field from the official spec + of [Forensic Artifacts](https://artifacts.readthedocs.io/en/latest/). + +### Added + +* Introduced Server.grr_binaries_readonly configuration option (set to False + by default). When set to True, binaries and python hacks can't be overriden + or deleted. +* Added configuration option Monitoring.http_address to specify server address + of stats server. Default value will remain 127.0.0.1. + + +### Changed + +* Updates elasticsearch output plugin post request to _bulk in the + elasticsearch api. Adds a terminating \n and content type headers for + application/json. + +## [3.4.3.1] - 2021-05-19 + +### API added + +* Introduced KillFleetspeak, RestartFleetspeakGrrService, + DeleteFleetspeakPendingMessages, GetFleetspeakPendingMessages, + GetFleetspeakPendingMessageCount API methods to provide Fleetspeak-specific + capabilities for Fleetspeak-enabled clients. +* Introduced ListParsedFlowResults and ListFlowApplicableParsers API methods + for on-demand artifacts parsing. + +### Added + +* Introduced Hunt.default_client_rate configuration option. + +## [3.4.2.4] - 2020-10-15 + +### API added + +* `GetVersion` method was introduced. It returns information about version of + the GRR server. +* API shell now validates GRR server version and if it discovers that the server + is newer than the API client, it will fail on startup. One can bypass this + behaviour by using the `--no-check-version` flag. + +### API removed + +* ListAff4AttributeDescriptors API method (/api/reflection/aff4/attributes) + was removed. +* Support for exporting binary data in the BigQuery output plugin has been + removed. + +### API changed + +* `GetFileDetails` now raises if called on non-existing paths instead of + returning a dummy result. +* `GetVfsFilesArchive` now raises if called on non-existing paths instead of + returning an empty archive. +* All GRR Protocol Buffers messages now have proper package declarations. It + means that type URLs of all messages now changed. The Python API client is + able to handle legacy type URLs, but if you use raw API calls, makes sure it + does not break your workflow. + +### Changed + +* The server YAML configuration options path_globs_blacklist and + path_globs_whitelist in get_flow_files_archive of router_params of + ApiCallRobotRouter have been renamed to exclude_path_globs and + include_only_path_globs. +* The server YAML configuration option Artifacts.netgroup_user_blacklist has + been renamed to Artifacts.netgroup_ignore_users. +* The server YAML configuration options labels_whitelist and + labels_owners_whitelist in router_params of ApiLabelsRestrictedCallRouter + have been renamed to allow_labels and allow_labels_owners. +* The server YAML configuration option artifacts_whitelist of + artifact_collector_flow of router_params of ApiCallRobotRouter has been + renamed to allow_artifacts. +* The `ExecutePythonHack` flow returns a `ExecutePythonHackResponse` message + rather than raw string object as a response. +* ApiHunt.hunt_type was introduced and should be used instead of + a now-deprecated ApiHunt.name. +* Variable hunts now have their arguments filled in the ApiHunt.flow_args + attribute. +* JSON representation of `st_ino`, `st_dev`, `st_nlink`, `st_blocks`, + `st_blksize`, `st_rdev` fields of `StatEntry` now use strings rather than + integers. This is a consequence of increasing the supported integer size of + these values which might be out of bounds for JSON numbers. +* The `st_crtime` field of `StatEntry` has been renamed to `st_btime`. +* ArtifactCollectorFlowArgs, ArtifactFilesDownloaderFlowArgs: + * use_tsk is replaced with use_raw_filesystem_access + * use_tsk is kept for compatibility until 2021-04-01 + * please migrate away from use_tsk to use_raw_filesystem_access until then + * ValueError is raised if both fields are set + +## Removed + +* WinUserActivityInvestigationArgs: + * This message is obsolete, removing it. +* ClientArtifactCollectorArgs + * Removing use_tsk, since it hasn't been used on the client side + +## [3.3.0.0] - 2019-05-22 + +### API changed + +* ListFlows no longer includes "args" attributes into the returned flows. +* ListFlowOutputPluginsLogs, ListFlowOutputPluginErrors, + ListHuntOutputPluginLogs and ListHuntOutputPluginErrors API calls now always + report batch_index and batch_size as 0 and no longer include PluginDescriptor + into the reply. + +### API removed + +* ListHuntCrashes method no longer accepts "filter" argument. +* ListHunts no longer fills "total_count" attribute of ApiListHuntsResult. +* `ApiHunt` no longer has an `expires` field. Instead, `duration` field has + been added which can be used to calculate expiry date: + `start_time + duration`. Note that if the hunt hasn't been started, it does + not have `start_time` and, in consequence, it does not have expiry time as + well. +* `ApiModifyHuntArgs` no longer has an `expires` field. Instead, `duration` + field has been added. +* `artifact` field of `ApiUploadArtifactArgs` no longer accepts arbitrary byte + stream. Instead, only proper strings are accepted. Since this field is ought + to be the artifact description in the YAML format and YAML is required to be + UTF-8 encoded, it makes no sense to accept non-unicode objects. + +## [3.2.4.6] - 2018-12-20 + +### API changed + +* Renamed the task_eta field of the ApiClientActionRequest object to + leased_until. +* Got rid of ListCronJobFlows and GetCronJobFlow in favor of ListCronJobRuns + and GetCronJobRun. ListCronJobRuns/GetCronJobRun return ApiCronJobRun protos + instead of ApiFlow returned by deleted ListCronJobFlows/GetCronJobFlow. +* Changed CreateCronJob API call to accept newly introduced + ApiCreateCronJobArgs instead of an ApiCronJob. ApiCreateCronJobArgs only + allows to create hunt-based cron jobs. + +### API removed + +* All ApiFlowRequest responses do not fill the AFF4 specific + request_state.request field anymore. Similarly, the task_id and payload + fields in ApiFlowRequest.responses objects is not populated anymore starting + from this release. +* Flow log results returned by ApiListFlowLogsHandler do not contain the name + of the flow the logs are for anymore. +* The `ListPendingGlobalNotifications` and `DeletePendingGlobalNotification` + API methods have been deleted, since GRR no longer supports + global notifications. The corresponding protos + `ApiListPendingGlobalNotificationsResult` and + `ApiDeletePendingGlobalNotificationArgs` have been deprecated. + +## [3.2.3.2] - 2018-06-28 + +### API changed + +* GetGrrBinary API method result type has changed. It was changed to return + ApiGrrBinary object instead of a binary stream. The old behavior is + preserved in a newly introduced GetGrrBinaryBlob method. + +## [3.2.2.0] - 2018-03-12 + +### API added + +* Introduced ApiHuntLog, ApiHuntError and ApiFlowLog that are used in + ApiListHuntLogsResult, ApiListHuntErrorsResult and ApiListFlowLogsResult + respectively instead of jobs_pb2.FlowLog and jobs_pb2.HuntError. New + structures are partially backwards compatible with the old ones when used + via JSON (in protobuf format the fields indices is not compatible): + "log_message", "flow_name" and "backtrace" fields didn't change. "client_id" + field doesn't have an AFF4 prefix anymore. "urn" field was removed and + replaced with "flow_id". "timestamp" field was added. +* Added "cron_job_id" attribute to ApiCronJob. + +### API removed + +* Removed default "age" attribute from the legacy HTTP API JSON. Every value + rendered in legacy API responses will be dictionary of {value: ..., type: + ...} instead of {value: ..., type: ..., age: ...}. +* GetClientVersions API call(/api/clients//versions) does not + include metadata (last ping, last clock, last boot time, last crash time) + anymore. diff --git a/api_client/python/grr_api_client/client.py b/api_client/python/grr_api_client/client.py index 657cbd8bfc..aab06d6f78 100644 --- a/api_client/python/grr_api_client/client.py +++ b/api_client/python/grr_api_client/client.py @@ -183,11 +183,12 @@ def Approval(self, username, approval_id): approval_id=approval_id, context=self._context) - def CreateApproval(self, - reason=None, - notified_users=None, - email_cc_addresses=None, - keep_client_alive=False): + def CreateApproval( + self, + reason=None, + notified_users=None, + email_cc_addresses=None, + ): """Create a new approval for the current user to access this client.""" if not reason: @@ -203,7 +204,7 @@ def CreateApproval(self, args = user_pb2.ApiCreateClientApprovalArgs( client_id=self.client_id, approval=approval, - keep_client_alive=keep_client_alive) + ) data = self._context.SendRequest("CreateClientApproval", args) return ClientApproval( diff --git a/api_client/python/grr_api_client/utils.py b/api_client/python/grr_api_client/utils.py index 6cdb35cc12..0122681bcf 100644 --- a/api_client/python/grr_api_client/utils.py +++ b/api_client/python/grr_api_client/utils.py @@ -12,15 +12,13 @@ from google.protobuf import any_pb2 from google.protobuf import wrappers_pb2 - from google.protobuf import descriptor from google.protobuf import message from google.protobuf import symbol_database - from grr_api_client import errors - -from grr_response_proto import apple_firmware_pb2 +from grr_response_proto import crowdstrike_pb2 from grr_response_proto import deprecated_pb2 +from grr_response_proto import dummy_pb2 from grr_response_proto import flows_pb2 from grr_response_proto import jobs_pb2 from grr_response_proto import large_file_pb2 @@ -28,7 +26,6 @@ from grr_response_proto import pipes_pb2 from grr_response_proto import read_low_level_pb2 from grr_response_proto import timeline_pb2 - from grr_response_proto.api import artifact_pb2 from grr_response_proto.api import client_pb2 from grr_response_proto.api import config_pb2 @@ -276,7 +273,6 @@ def RegisterProtoDescriptors( *additional_descriptors: descriptor.FileDescriptor, ) -> None: """Registers all API-releated descriptors in a given symbol DB.""" - db.RegisterFileDescriptor(apple_firmware_pb2.DESCRIPTOR) db.RegisterFileDescriptor(artifact_pb2.DESCRIPTOR) db.RegisterFileDescriptor(client_pb2.DESCRIPTOR) db.RegisterFileDescriptor(config_pb2.DESCRIPTOR) @@ -291,7 +287,7 @@ def RegisterProtoDescriptors( db.RegisterFileDescriptor(user_pb2.DESCRIPTOR) db.RegisterFileDescriptor(vfs_pb2.DESCRIPTOR) db.RegisterFileDescriptor(yara_pb2.DESCRIPTOR) - + db.RegisterFileDescriptor(crowdstrike_pb2.DESCRIPTOR) db.RegisterFileDescriptor(deprecated_pb2.DESCRIPTOR) db.RegisterFileDescriptor(flows_pb2.DESCRIPTOR) db.RegisterFileDescriptor(jobs_pb2.DESCRIPTOR) @@ -299,6 +295,7 @@ def RegisterProtoDescriptors( db.RegisterFileDescriptor(osquery_pb2.DESCRIPTOR) db.RegisterFileDescriptor(pipes_pb2.DESCRIPTOR) db.RegisterFileDescriptor(timeline_pb2.DESCRIPTOR) + db.RegisterFileDescriptor(dummy_pb2.DESCRIPTOR) db.RegisterFileDescriptor( wrappers_pb2.DESCRIPTOR) # type: ignore[attr-defined] diff --git a/colab/grr_colab/errors.py b/colab/grr_colab/errors.py index e9bf44b52b..91e1146d87 100644 --- a/colab/grr_colab/errors.py +++ b/colab/grr_colab/errors.py @@ -71,7 +71,7 @@ def __init__(self, def _build_path_to_ui(self) -> Optional[Text]: if not FLAGS.grr_admin_ui_url: return None - url = '{}/#/clients/{}/flows/{}' + url = '{}/v2/clients/{}/flows/{}' return url.format(FLAGS.grr_admin_ui_url, self.client_id, self.flow_id) diff --git a/colab/grr_colab/fs.py b/colab/grr_colab/fs.py index d13ef75a12..9f1fa4d6b8 100644 --- a/colab/grr_colab/fs.py +++ b/colab/grr_colab/fs.py @@ -1,7 +1,8 @@ #!/usr/bin/env python """Module that contains API to perform filesystem operations on a GRR client.""" import io -from typing import Text, Sequence +import itertools +from typing import Sequence, Text from google.protobuf import message from grr_api_client import client @@ -12,6 +13,7 @@ from grr_colab import vfs from grr_response_proto import flows_pb2 from grr_response_proto import jobs_pb2 +from grr_response_server.flows.general import file_finder class FileSystem(object): @@ -124,7 +126,9 @@ def grep(self, path: Text, _timeout.await_flow(ff) - results = [_first_match(result.payload) for result in ff.ListResults()] + results = itertools.chain.from_iterable( + _all_matches(result.payload) for result in ff.ListResults() + ) return representer.BufferReferenceList(results) def fgrep(self, path: Text, @@ -158,7 +162,9 @@ def fgrep(self, path: Text, _timeout.await_flow(ff) - results = [_first_match(result.payload) for result in ff.ListResults()] + results = itertools.chain.from_iterable( + _all_matches(result.payload) for result in ff.ListResults() + ) return representer.BufferReferenceList(results) def wget(self, path: Text) -> Text: @@ -196,20 +202,23 @@ def _collect_file(self, path: Text) -> None: Returns: Nothing. """ - args = flows_pb2.GetFileArgs() - args.pathspec.path = path - args.pathspec.pathtype = self._path_type + args = flows_pb2.FileFinderArgs() + args.paths.append(path) + args.pathtype = self._path_type + args.action.action_type = flows_pb2.FileFinderAction.Action.DOWNLOAD try: - gf = self._client.CreateFlow(name='GetFile', args=args) + cff = self._client.CreateFlow( + name=file_finder.ClientFileFinder.__name__, args=args + ) except api_errors.AccessForbiddenError as e: raise errors.ApprovalMissingError(self.id, e) - _timeout.await_flow(gf) + _timeout.await_flow(cff) -def _first_match(result: message.Message) -> jobs_pb2.BufferReference: - """Returns first match of a file finder result. +def _all_matches(result: message.Message) -> list[jobs_pb2.BufferReference]: + """Returns all matchches of a file finder result. Args: result: A file finder result message. @@ -221,4 +230,4 @@ def _first_match(result: message.Message) -> jobs_pb2.BufferReference: if not isinstance(result, flows_pb2.FileFinderResult): raise TypeError(f'Unexpected flow result type: {type(result)}') - return result.matches[0] + return result.matches diff --git a/devenv/README.md b/devenv/README.md index dd10ef66b6..88fb1819a5 100644 --- a/devenv/README.md +++ b/devenv/README.md @@ -71,21 +71,21 @@ directory is also available, at runtime, to all GRR components. ## Development Flow Example -1. check that the dev environment can run on the host system: `bash - devenv/devenv.sh check_deps` -2. start the dev environment: `bash devenv/devenv.sh start` -3. check that everything is up and running: `bash devenv/devenv.sh status` -4. find the generated GRR client ID: `bash curl -su admin:admin - http://localhost:4280/api/clients \ | sed 1d \ | jq -r - ".items[].value.client_id.value"` Note: the above assumes the default values - in `devenv/config.py` (such as Admin UI port number and admin user details) - have not been changed. It also assumes `curl`, `sed`, and `jq` are available - on the host system. -5. open a browser and go to the Admin UI client info page: - `http://localhost:4280/#/clients/{CLIENT_ID}/host-info` -6. edit the GRR worker python code; -7. restart the `grr-worker` container so that code changes are picked up: - `devenv/devenv.sh restart grr-worker` +1. check that the dev environment can run on the host system: `bash + devenv/devenv.sh check_deps` +2. start the dev environment: `bash devenv/devenv.sh start` +3. check that everything is up and running: `bash devenv/devenv.sh status` +4. find the generated GRR client ID: `bash curl -su admin:admin + http://localhost:4280/api/clients \ | sed 1d \ | jq -r + ".items[].value.client_id.value"` Note: the above assumes the default values + in `devenv/config.py` (such as Admin UI port number and admin user details) + have not been changed. It also assumes `curl`, `sed`, and `jq` are available + on the host system. +5. open a browser and go to the Admin UI client info page: + `http://localhost:4280/v2/clients/{CLIENT_ID}` +6. edit the GRR worker python code; +7. restart the `grr-worker` container so that code changes are picked up: + `devenv/devenv.sh restart grr-worker` ### Debugging diff --git a/devenv/src/config.py b/devenv/src/config.py index 37acd950d3..99bbe52664 100644 --- a/devenv/src/config.py +++ b/devenv/src/config.py @@ -18,7 +18,7 @@ CONFIG["ui.admin_user"] = "admin" CONFIG["ui.admin_password"] = "admin" CONFIG["build.nodejs_version"] = "16.13.0" -CONFIG["cli.container_detach_keys"] = "ctrl-p,ctrl-q" +CONFIG["cli.container_detach_keys"] = "ctrl-p,ctrl-d" def get(key: str) -> Any: diff --git a/devenv/src/mypy.ini b/devenv/src/mypy.ini deleted file mode 100644 index 685c02599f..0000000000 --- a/devenv/src/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -disallow_untyped_defs = True -check_untyped_defs = True diff --git a/devenv/src/pylintrc b/devenv/src/pylintrc deleted file mode 100644 index 2f4ecb6a1e..0000000000 --- a/devenv/src/pylintrc +++ /dev/null @@ -1,8 +0,0 @@ -[FORMAT] - -indent-string=' ' - - -[MESSAGES CONTROL] - -disable=missing-class-docstring, missing-function-docstring diff --git a/devenv/src/pytest.ini b/devenv/src/pytest.ini deleted file mode 100644 index 2864d8308d..0000000000 --- a/devenv/src/pytest.ini +++ /dev/null @@ -1 +0,0 @@ -# devenv pytest.ini diff --git a/devenv/src/reslib.py b/devenv/src/reslib.py index 5e61c89592..565998c600 100644 --- a/devenv/src/reslib.py +++ b/devenv/src/reslib.py @@ -3,11 +3,17 @@ import abc import contextlib +import os import pathlib +import pty +import select +import shutil +import socket import subprocess import sys +import time import traceback -from typing import Any, Dict, Iterable, Iterator, List, Optional +from typing import Any, Dict, Iterable, Iterator, List, Optional, Union from . import config from . import util @@ -120,7 +126,7 @@ class HostPathVolume(Volume): """Container volume backed by a host directory.""" def is_up(self) -> bool: - return self.host_path.is_dir() + return self.host_path.exists() def create(self) -> None: raise ResourceError("Attempted to use HostPathVolume without a host path.") @@ -389,3 +395,250 @@ def create(self) -> None: def destroy(self) -> None: pass + + +class BackgroundProcess(Resource): + """A user-specified background process. + + Creating this resource will spawn a background process attached to a pseudo + TTY. Access to this PTY is managed via a control process, itself accessed via + a Unix socket. That is, the user can attach to the PTY and this way interact + with the background process. The resource is considered up/active as long as + the PTY is kept open by the background process. + + This is (very) roughly equivalent to running a process in a screen or tmux + session. + + The control process can be inspected via three files that it will create in + the devenv state dir (see CONFIG["path.state_dir"]): + - the Unix socket it will listen to for commands / attaches; + - the PID file to which it will write its PID; + - the log file, to which the control stdout/stderr are sent, together with all + output from the background/target process; this should make it easier to + debug unexpected issues with both the resource definition and the code in + this class. + """ + + def __init__( + self, + name: str, + command: List[str], + deps: Optional[List[Resource]] = None, + ): + super().__init__(name, deps) + if command[0].startswith("/"): + path = command[0] + else: + maybe_path = shutil.which(command[0]) + if not maybe_path: + raise ResourceError(f"Bad BackgroundProcess command: {command}") + path = maybe_path + self._target_path: str = path + self._target_args: List[str] = command[1:] + self._ctl_sock_path = config.get("path.state_dir").joinpath( + f"{self.name}.sock" + ) + self._ctl_pid_path = config.get("path.state_dir").joinpath( + f"{self.name}.pid" + ) + self._ctl_log_path = config.get("path.state_dir").joinpath( + f"{self.name}.log" + ) + + def is_up(self) -> bool: + return self._ctl_sock_path.exists() + + def create(self) -> None: + """Create the background process, managed by a daemonized control loop.""" + + # Fork the management / control process + mgr_pid = os.fork() + if not mgr_pid: + # This is the management process. Fork again, this time with a pseudo TTY + # allocation for the child process. + pid, pty_fd = pty.fork() + if not pid: + # This is the child process which will be used to exec into the actual + # target process that this resource is intended to run in the + # background. Note that `os.exec*` never returns, but replaces the + # current process entirely. + os.execv(self._target_path, [self._target_path] + self._target_args) + else: + # On the management/control side, we daemonize and call into the main + # control loop. + os.setsid() + self._manage(pid, pty_fd) + sys.exit(0) + + # This is only reached by the main process that called `create()`. + # Having created the (background) control process, return now to other + # devenv duties. + + def destroy(self) -> None: + """Kill the background process.""" + + try: + with open(self._ctl_pid_path, "r") as pid_file: + mgr_pid: int = int(pid_file.read(32)) + sock = self._connect() + sock.send(b"EXIT\n") + sock.close() + time.sleep(1) + finally: + if self._ctl_sock_path.exists(): + util.kill_process(mgr_pid) + self._ctl_sock_path.unlink() + self._ctl_pid_path.unlink() + + def restart(self) -> None: + """Restart the background process.""" + + if self.is_up(): + self.destroy() + self.create() + + def attach(self) -> None: + """Attach to a previously created background process' pseudo TTY.""" + + util.say(f"Attaching to {self.__class__.__name__}.{self.name} ...") + sock = self._connect() + sock.send(b"ATTACH\n") + expect: bytes = b"OK\n" + if sock.recv(len(expect)) != expect: + raise ResourceError(f"Error attaching to background process {self.name}") + util.say("Attached. Detach with ,.") + + subprocess.run(["stty", "-echo", "cbreak"], check=True) + try: + while True: + try: + ready_list, _, _ = select.select([sock, sys.stdin], [], [], 10) + if sock in ready_list: + buf = sock.recv(4096) + if not buf: + util.say_warn("Background process connection reset") + break + os.write(sys.stdout.fileno(), buf) + if sys.stdin in ready_list: + buf = os.read(sys.stdin.fileno(), 1) + if buf == b"\x10": + # Received ctrl-p (ASCII 0x10). This is the first keystroke in + # the detach sequence. Wait for the next one for 1 second, and + # detach if it completes the sequence. + ready, _, _ = select.select([sys.stdin], [], [], 1) + if sys.stdin in ready: + buf2 = os.read(sys.stdin.fileno(), 1) + if buf2 == b"\x04": + # Got ctrl-d (ASCII 0x04), so the detach sequence is complete. + print("") + util.say("Detached") + break + else: + # Not the detach sequence we were looking for. Send everything + # to the attached PTY. + buf += buf2 + sock.send(buf) + except KeyboardInterrupt: + # Send ctrl-c to the background process + sock.send(b"\x03") + finally: + subprocess.run(["stty", "echo", "-cbreak"], check=True) + + def _connect(self) -> socket.socket: + """Connect to the background process control socket.""" + + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(str(self._ctl_sock_path)) + return sock + + def _manage(self, target_pid: int, pty_fd: int) -> None: + """Background process control loop.""" + + # This is executed only in the context of the daemonized control process. It + # listens on a Unix socket for commands, most important of which is ATTACH. + # This forwards the connected Unix socket to the pseudo TTY of the target + # background process, giving the user terminal access to it. + + # Set up logging for stdout/stderr + if not self._ctl_log_path.parent.exists(): + self._ctl_log_path.parent.mkdir(parents=True) + with open(self._ctl_log_path, "w") as log_file: + os.dup2(log_file.fileno(), 1) + os.dup2(log_file.fileno(), 2) + now: str = time.strftime("%Y-%m-%d %H:%M:%S") + sys.stdout.write( + f"\n{now} {self.__class__.__name__}.{self.name} starting ...\n" + ) + + # Write PID file + if not self._ctl_pid_path.parent.exists(): + self._ctl_pid_path.parent.mkdir(parents=True) + with open(self._ctl_pid_path, "w") as pid_file: + pid_file.write(f"{os.getpid()}") + + # Open the control socket + ctl_sock: socket.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + if not self._ctl_sock_path.parent.exists(): + self._ctl_sock_path.parent.mkdir(parents=True) + ctl_sock.bind(str(self._ctl_sock_path)) + ctl_sock.listen(1) + + client_sock: Optional[socket.socket] = None + term_buf: util.RollingLineBuffer = util.RollingLineBuffer(50) + + # Main control loop + while True: + rlist: List[Union[socket.socket, int]] = ( + [client_sock] if client_sock else [ctl_sock] + ) + rlist.append(pty_fd) + ready_list, _, _ = select.select(rlist, [], [], 10) + + # Check for new clients + if ctl_sock in ready_list: + client_sock, _ = ctl_sock.accept() + cmd = client_sock.recv(32) + if cmd == b"EXIT\n": + break + elif cmd == b"CHECK\n": + client_sock.send(b"OK\n") + elif cmd == b"ATTACH\n": + client_sock.send(b"OK\n") + client_sock.send(term_buf.get().encode("utf-8")) + else: + client_sock.close() + client_sock = None + + # Check for incoming client data + if client_sock and client_sock in ready_list: + buf = client_sock.recv(4096) + if not buf: + client_sock = None + continue + try: + os.write(pty_fd, buf) + except OSError: + client_sock.close() + break + + # Check for target process pty output + if pty_fd in ready_list: + try: + buf = os.read(pty_fd, 4096) + except OSError: + if client_sock: + client_sock.close() + break + # Send target output to rolling buffer + term_buf.add(buf.decode("utf-8")) + # Send target output to log + sys.stdout.write(util.term.strip_control_chars(buf.decode("utf-8"))) + sys.stdout.flush() + # Send target output to client, if any is connected + if client_sock: + client_sock.send(buf) + + util.kill_process(target_pid) + ctl_sock.close() + self._ctl_sock_path.unlink() + self._ctl_pid_path.unlink() diff --git a/devenv/src/util/__init__.py b/devenv/src/util/__init__.py index 57914fc724..e8769fe3e1 100644 --- a/devenv/src/util/__init__.py +++ b/devenv/src/util/__init__.py @@ -1,7 +1,11 @@ #!/usr/bin/env python """Misc utils (aka #include ).""" +import collections +import os +import signal import sys +import time from .. import config from . import term @@ -29,3 +33,49 @@ def say_warn(msg: str) -> None: def str_mid_pad(s: str, width: int, fill: str) -> str: pad = fill * int((width - len(s)) / (2 * len(fill))) return f"{pad}{s}{pad}" + + +def kill_process(pid: int) -> None: + """Kill a process and make sure it's dead.""" + + try: + os.kill(pid, signal.SIGTERM) + except OSError: + # pid already dead + return + dead: bool = False + for _ in range(10): + try: + os.kill(pid, 0) + except OSError: + dead = True + break + time.sleep(1) + if not dead: + os.kill(pid, signal.SIGKILL) + + +class RollingLineBuffer: + """A (very naive) rolling text line buffer. + + The buffer only keeps track of the last N lines of text. + """ + + def __init__(self, capacity: int) -> None: + self._lines: collections.deque[str] = collections.deque() + self._capacity: int = capacity + + def add(self, buf: str) -> None: + """Add text to the buffer.""" + + if self._lines: + buf = self._lines.pop() + buf + for line in buf.split("\n"): + self._lines.append(line) + if len(self._lines) > self._capacity: + self._lines.popleft() + + def get(self) -> str: + """Get the full buffer contents as text.""" + + return "\n".join(list(self._lines)) diff --git a/devenv/src/util/term.py b/devenv/src/util/term.py index f1201171e8..29858350b5 100644 --- a/devenv/src/util/term.py +++ b/devenv/src/util/term.py @@ -1,6 +1,7 @@ #!/usr/bin/env python """Terminal pretty stuffs.""" +import re import sys from typing import Callable @@ -25,3 +26,8 @@ def _colorize(buf: str, color_code: str) -> str: ok: Callable[[str], str] = lambda buf: _colorize(buf, _GREEN_FG) meh: Callable[[str], str] = lambda buf: _colorize(buf, _GRAY_FG) attn: Callable[[str], str] = lambda buf: _colorize(buf, _WHITE_FG) + + +def strip_control_chars(buf: str) -> str: + """Strips terminal control characters from a given string.""" + return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", "", buf) diff --git a/grr/client/grr_response_client/actions.py b/grr/client/grr_response_client/actions.py index 29b4554e14..7cdb79f5a3 100644 --- a/grr/client/grr_response_client/actions.py +++ b/grr/client/grr_response_client/actions.py @@ -11,7 +11,6 @@ import psutil -from grr_response_client import client_utils from grr_response_client.unprivileged import communication from grr_response_core import config from grr_response_core.lib import rdfvalue @@ -333,9 +332,6 @@ def Progress(self): ActionPlugin.last_progress_time = now - # Prevent the machine from sleeping while the action is running. - client_utils.KeepAlive() - self.grr_worker.Heartbeat() used_cpu = self.cpu_times.total_cpu_used diff --git a/grr/client/grr_response_client/client_actions/action_test.py b/grr/client/grr_response_client/client_actions/action_test.py index a3c4b2632e..7b5c3ce8ba 100644 --- a/grr/client/grr_response_client/client_actions/action_test.py +++ b/grr/client/grr_response_client/client_actions/action_test.py @@ -13,7 +13,6 @@ import psutil from grr_response_client import actions -from grr_response_client import client_utils from grr_response_client.client_actions import standard from grr_response_client.unprivileged import communication from grr_response_core.lib import rdfvalue @@ -307,18 +306,27 @@ def testProgressThrottling(self): class MockWorker(object): + def __init__(self): + self.heartbeat_count = 0 + def Heartbeat(self): - pass + self.heartbeat_count += 1 worker = MockWorker() - with test_lib.Instrument(client_utils, "KeepAlive") as instrument: - for time, expected_count in [(100, 1), (101, 1), (102, 1), (103, 2), - (104, 2), (105, 2), (106, 3)]: - with test_lib.FakeTime(time): - action = ProgressAction(grr_worker=worker) - action.Progress() - self.assertEqual(instrument.call_count, expected_count) + for time, expected_count in [ + (100, 1), + (101, 1), + (102, 1), + (103, 2), + (104, 2), + (105, 2), + (106, 3), + ]: + with test_lib.FakeTime(time): + action = ProgressAction(grr_worker=worker) + action.Progress() + self.assertEqual(worker.heartbeat_count, expected_count) def main(argv): diff --git a/grr/client/grr_response_client/client_actions/dummy.py b/grr/client/grr_response_client/client_actions/dummy.py new file mode 100644 index 0000000000..fb0f284970 --- /dev/null +++ b/grr/client/grr_response_client/client_actions/dummy.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +"""The Dummy client action.""" + +from grr_response_client import actions +from grr_response_core.lib.rdfvalues import dummy as rdf_dummy + + +class Dummy(actions.ActionPlugin): + """Returns the received string.""" + + in_rdfvalue = rdf_dummy.DummyRequest + out_rdfvalues = [rdf_dummy.DummyResult] + + def Run(self, args: rdf_dummy.DummyRequest) -> None: + """Returns received input back to the server.""" + + if not args.action_input: + raise RuntimeError("args.action_input is empty, cannot proceed!") + + self.SendReply( + rdf_dummy.DummyResult( + action_output=f"args.action_input: '{args.action_input}'" + ) + ) diff --git a/grr/client/grr_response_client/client_actions/dummy_test.py b/grr/client/grr_response_client/client_actions/dummy_test.py new file mode 100644 index 0000000000..a4418c4ae0 --- /dev/null +++ b/grr/client/grr_response_client/client_actions/dummy_test.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +"""Tests for dummy.""" + +from absl import app + +from grr_response_client.client_actions import dummy +from grr_response_core.lib.rdfvalues import dummy as rdf_dummy +from grr_response_core.lib.rdfvalues import flows as rdf_flows +from grr.test_lib import client_test_lib +from grr.test_lib import test_lib + + +class DummyTest(client_test_lib.EmptyActionTest): + """Test Dummy action.""" + + def testDummyReceived(self): + action_request = rdf_dummy.DummyRequest(action_input="banana") + + # We use `ExecuteAction` instead of `RunAction` to test `status` result too. + results = self.ExecuteAction(dummy.Dummy, action_request) + + # One result, and one status message. + self.assertLen(results, 2) + + self.assertIsInstance(results[0], rdf_dummy.DummyResult) + self.assertIn("banana", results[0].action_output) + + self.assertIsInstance(results[1], rdf_flows.GrrStatus) + self.assertEqual(rdf_flows.GrrStatus.ReturnedStatus.OK, results[1].status) + self.assertEmpty(results[1].error_message) + + def testErrorsOnEmptyInput(self): + action_request = rdf_dummy.DummyRequest() + + # We use `ExecuteAction` instead of `RunAction` to test `status` result too. + results = self.ExecuteAction(dummy.Dummy, action_request) + + # One status message. + self.assertLen(results, 1) + + self.assertIsInstance(results[0], rdf_flows.GrrStatus) + self.assertEqual( + rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR, results[0].status + ) + self.assertIn("empty", results[0].error_message) + + +def main(argv): + test_lib.main(argv) + + +if __name__ == "__main__": + app.run(main) diff --git a/grr/client/grr_response_client/client_actions/file_finder.py b/grr/client/grr_response_client/client_actions/file_finder.py index 6465c9ad79..47dbbce24f 100644 --- a/grr/client/grr_response_client/client_actions/file_finder.py +++ b/grr/client/grr_response_client/client_actions/file_finder.py @@ -2,8 +2,7 @@ """The file finder client action.""" import io - -from typing import Callable, Iterator, Text, List +from typing import Callable, Iterator, List, Text from grr_response_client import actions from grr_response_client import client_utils @@ -121,13 +120,33 @@ def _ValidateRegularity(self, stat, args, filepath): raise _SkipFileException() def _ValidateMetadata(self, stat, filepath): + # This check ensures consistent behavior between the legacy file finder and + # the client file finder. The legacy file finder was automatically + # following symlinks to regular files. + if stat.IsSymlink(): + target_stat = filesystem.Stat.FromPath( + stat.GetPath(), follow_symlink=True + ) + if target_stat.IsRegular(): + stat = target_stat + for metadata_condition in self._metadata_conditions: if not metadata_condition.Check(stat): raise _SkipFileException() def _ValidateContent(self, stat, filepath, matches): if self._content_conditions and not stat.IsRegular(): - raise _SkipFileException() + # This check ensures consistent behavior between the legacy file finder + # and the client file finder. The legacy file finder was automatically + # following symlinks to regular files. + if stat.IsSymlink(): + target_stat = filesystem.Stat.FromPath( + stat.GetPath(), follow_symlink=True + ) + if not target_stat.IsRegular(): + raise _SkipFileException() + else: + raise _SkipFileException() for content_condition in self._content_conditions: with io.open(filepath, "rb") as fd: diff --git a/grr/client/grr_response_client/client_actions/file_finder_utils/globbing.py b/grr/client/grr_response_client/client_actions/file_finder_utils/globbing.py index 2a1dda0ea4..90cfc6eb9c 100644 --- a/grr/client/grr_response_client/client_actions/file_finder_utils/globbing.py +++ b/grr/client/grr_response_client/client_actions/file_finder_utils/globbing.py @@ -133,8 +133,20 @@ def _Recurse(self, path, depth): except IOError: return # Skip inaccessible Registry parts (e.g. HKLM\SAM\SAM) silently. else: - raise AssertionError("Pathtype {} is not supported for recursion".format( - self.opts.pathtype)) + # We allow recursive TSK/NTFS searches with a depth level up to 2. + if depth > 2: + raise AssertionError( + f"Pathtype {self.opts.pathtype} is not supported for recursion with" + f" depth {depth} (max is 2)" + ) + + pathspec = rdf_paths.PathSpec(path=path, pathtype=self.opts.pathtype) + try: + with vfs.VFSOpen(pathspec) as filedesc: + if not filedesc.IsDirectory(): + return + except IOError: + return # Skip inaccessible Registry parts silently. for childpath in self._Generate(path, depth + 1): yield childpath diff --git a/grr/client/grr_response_client/client_actions/linux/linux.py b/grr/client/grr_response_client/client_actions/linux/linux.py index 425a7348d0..3615c11411 100644 --- a/grr/client/grr_response_client/client_actions/linux/linux.py +++ b/grr/client/grr_response_client/client_actions/linux/linux.py @@ -321,18 +321,6 @@ def Run(self, unused_arg): raise NotImplementedError("Not implemented") -class Uninstall(actions.ActionPlugin): - """Uninstall GRR. Place holder, does nothing. - - Note this needs to handle the different distributions separately, e.g. Redhat - vs Debian. - """ - out_rdfvalues = [rdf_protodict.DataBlob] - - def Run(self, unused_arg): - raise NotImplementedError("Not implemented") - - class UpdateAgent(standard.ExecuteBinaryCommand): """Updates the GRR agent to a new version.""" diff --git a/grr/client/grr_response_client/client_actions/memory.py b/grr/client/grr_response_client/client_actions/memory.py index 65e00aba26..89e79844b0 100644 --- a/grr/client/grr_response_client/client_actions/memory.py +++ b/grr/client/grr_response_client/client_actions/memory.py @@ -5,6 +5,7 @@ import collections import contextlib import io +import logging import os import platform import re @@ -82,10 +83,26 @@ def ProcessIterator(pids, process_regex_string, cmdline_regex_string, process_iterator = psutil.process_iter() for p in process_iterator: - if process_regex and not process_regex.search(p.name()): + + try: + process_name = p.name() + except psutil.AccessDenied as error: + # Catch AccessDenied errors in case psutil can't get the process name. + logging.error("failed to obtain process name: %s", error) + process_name = "" + + if process_regex and not process_regex.search(process_name): continue - if cmdline_regex and not cmdline_regex.search(" ".join(p.cmdline())): + try: + cmdline = p.cmdline() + except psutil.AccessDenied as error: + # psutil raises AccessDenied when getting the cmdline for special + # processes like Registry or System on Windows. + logging.error("failed to obtain process command line: %s", error) + cmdline = [] + + if cmdline_regex and not cmdline_regex.search(" ".join(cmdline)): continue if p.pid == grr_pid: diff --git a/grr/client/grr_response_client/client_actions/operating_system.py b/grr/client/grr_response_client/client_actions/operating_system.py index 8b0842124f..9cbbf44030 100644 --- a/grr/client/grr_response_client/client_actions/operating_system.py +++ b/grr/client/grr_response_client/client_actions/operating_system.py @@ -41,5 +41,4 @@ else: OSXEnumerateRunningServices = None EnumerateRunningServices = None -Uninstall = submodule.Uninstall UpdateAgent = submodule.UpdateAgent diff --git a/grr/client/grr_response_client/client_actions/osx/firmware.py b/grr/client/grr_response_client/client_actions/osx/firmware.py deleted file mode 100644 index fc01db5a00..0000000000 --- a/grr/client/grr_response_client/client_actions/osx/firmware.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/env python -"""Execute eficheck on the client.""" - -import glob -import os -import re - -from grr_response_client import actions -from grr_response_client import client_utils_common -from grr_response_client.client_actions import tempfiles -from grr_response_core.lib.rdfvalues import apple_firmware as rdf_apple_firmware -from grr_response_core.lib.rdfvalues import client_action as rdf_client_action -from grr_response_core.lib.rdfvalues import paths as rdf_paths - - -class EficheckActionPlugin(actions.ActionPlugin): - """Base class for Eficheck Client Action. - - Generic method(s) to be used by eficheck-related client actions. - """ - - def _GetVersion(self, args): - """Call eficheck to find out its version.""" - res = client_utils_common.Execute(args.cmd_path, ["--version"]) - stdout, stderr, exit_status, time_used = res - - # If something went wrong, forward the output directly. - if exit_status: - binary_response = rdf_client_action.ExecuteBinaryResponse( - stdout=stdout, - stderr=stderr, - exit_status=exit_status, - time_used=time_used) - self.SendReply(self.out_rdfvalues[0](response=binary_response)) - return - return stdout - - -class EficheckCollectHashes(EficheckActionPlugin): - """A client action to collect the EFI hashes via Apple eficheck.""" - - in_rdfvalue = rdf_apple_firmware.EficheckConfig - out_rdfvalues = [rdf_apple_firmware.CollectEfiHashesResponse] - - # The filename of the generated allowlist is passed as argument to the next - # command. Make sure it matches a specific format to avoid any command - # injection. - _FILENAME_RE = re.compile(r"^[a-zA-Z0-9_.]+$") - - def Run(self, args): - """Use eficheck to extract hash files in plaintext. - - Args: - args: EficheckConfig - Returns: - CollectEfiHashesResponse - - This action executes eficheck multiple times: - * First to get the binary version, using --version. - * Then with the --generate-hashes option. This will create one or more - .ealf files. Each file contains a binary representation of the hashes - extracted from a part of the flash image (e.g, EFI, SEC). - * For each file generated, we use the --show-hashes option to get a - plaintext representation of the hashes. This raw output is sent to the - server which will perform further parsing. - """ - - eficheck_version = self._GetVersion(args) - if not eficheck_version: - return False - - with tempfiles.TemporaryDirectory() as tmp_dir: - res = client_utils_common.Execute( - args.cmd_path, ["--generate-hashes"], cwd=tmp_dir.path) - stdout, stderr, exit_status, time_used = res - # If something went wrong, forward the output directly. - if exit_status: - binary_response = rdf_client_action.ExecuteBinaryResponse( - stdout=stdout, - stderr=stderr, - exit_status=exit_status, - time_used=time_used) - self.SendReply( - rdf_apple_firmware.CollectEfiHashesResponse( - response=binary_response)) - return - # Otherwise, convert all the files generated and forward the output. - - for filename in glob.glob(os.path.join(tmp_dir.path, "*.ealf")): - cmd_args = ["--show-hashes", "-h", filename] - # Get the boot rom version from the filename. - basename = os.path.basename(filename) - if not self._FILENAME_RE.match(basename): - continue - boot_rom_version, _ = os.path.splitext(basename) - stdout, stderr, exit_status, time_used = client_utils_common.Execute( - args.cmd_path, cmd_args, bypass_allowlist=True) - - binary_response = rdf_client_action.ExecuteBinaryResponse( - stdout=stdout, - stderr=stderr, - exit_status=exit_status, - time_used=time_used) - self.SendReply( - rdf_apple_firmware.CollectEfiHashesResponse( - eficheck_version=eficheck_version, - boot_rom_version=boot_rom_version, - response=binary_response)) - - tempfiles.DeleteGRRTempFile(filename) - - -class EficheckDumpImage(EficheckActionPlugin): - """A client action to collect the full EFI image via Apple eficheck.""" - - in_rdfvalue = rdf_apple_firmware.EficheckConfig - out_rdfvalues = [rdf_apple_firmware.DumpEfiImageResponse] - - def Run(self, args): - """Use eficheck to extract the binary image of the flash. - - Args: - args: EficheckConfig - Returns: - DumpEfiImageResponse - - This action executes eficheck multiple times: - * First to get the binary version, using --version. - * Use --save -b firmware.bin to save the image. - """ - - eficheck_version = self._GetVersion(args) - if not eficheck_version: - return False - - with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir: - res = client_utils_common.Execute( - args.cmd_path, ["--save", "-b", "firmware.bin"], cwd=tmp_dir.path) - stdout, stderr, exit_status, time_used = res - binary_response = rdf_client_action.ExecuteBinaryResponse( - stdout=stdout, - stderr=stderr, - exit_status=exit_status, - time_used=time_used) - response = rdf_apple_firmware.DumpEfiImageResponse( - eficheck_version=eficheck_version, response=binary_response) - if exit_status: - tmp_dir.cleanup = True - else: - response.path = rdf_paths.PathSpec( - path=os.path.join(tmp_dir.path, "firmware.bin"), - pathtype=rdf_paths.PathSpec.PathType.TMPFILE) - self.SendReply(response) diff --git a/grr/client/grr_response_client/client_actions/osx/firmware_test.py b/grr/client/grr_response_client/client_actions/osx/firmware_test.py deleted file mode 100644 index 65b7e822c1..0000000000 --- a/grr/client/grr_response_client/client_actions/osx/firmware_test.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python -"""Test Eficheck client actions.""" - -import os -from unittest import mock - -from absl import app - -from grr_response_client.client_actions import tempfiles -from grr_response_client.client_actions.osx import firmware -from grr_response_core.lib.rdfvalues import apple_firmware as rdf_apple_firmware -from grr.test_lib import client_test_lib -from grr.test_lib import test_lib - - -def MockExecute(unused_cmd, args, **unused_kwds): - if "--version" in args: - return (b"v1.14", b"", 0, 5) - elif "--generate-hashes" in args: - return (b"Successfully wrote hashes.", b"", 0, 5) - elif args == ["--save", "-b", "firmware.bin"]: - return (b"Successfully wrote the image.", b"", 0, 15) - elif "--show-hashes" in args: - return (b"00:01:02:12345:abcd-12345", b"", 0, 5) - - -def FailedMockExecute(unused_cmd, unused_args, **unused_kwds): - return (b"", b"Unable to find the eficheck binary", -1, 10) - - -def FailedDumpMockExecute(unused_cmd, args, **unused_kwds): - if "--version" in args: - return (b"v1.14", b"", 0, 5) - else: - return (b"", b"Unable to dump the binary image", -1, 10) - - -@mock.patch.multiple( - "grr_response_client.client_actions.osx" - ".firmware", - glob=mock.DEFAULT, - client_utils_common=mock.DEFAULT) -class TestEficheckCollect(client_test_lib.EmptyActionTest): - """Test class for GRR-eficheck actions.""" - - def testEficheckCollectHashes(self, glob, client_utils_common): - """Test the basic hash collection action.""" - - client_utils_common.Execute = MockExecute - glob.glob.return_value = ["./MBP142.88Z.F000.B00.123.0.ealf"] - - args = rdf_apple_firmware.EficheckConfig() - with mock.patch.object(tempfiles, "DeleteGRRTempFile", - lambda filename: None): - result = self.RunAction(firmware.EficheckCollectHashes, args)[0] - - self.assertEqual(result.boot_rom_version, "MBP142.88Z.F000.B00.123.0") - self.assertEqual(result.eficheck_version, "v1.14") - self.assertEqual(result.response.stdout, b"00:01:02:12345:abcd-12345") - self.assertEqual(result.response.stderr, b"") - - def testFailedEficheckCollectHashes(self, glob, client_utils_common): - - client_utils_common.Execute = FailedMockExecute - glob.glob.return_value = [] - args = rdf_apple_firmware.EficheckConfig() - result = self.RunAction(firmware.EficheckCollectHashes, args)[0] - - self.assertEqual(result.response.stderr, - b"Unable to find the eficheck binary") - - def testEficheckCollectHashesWithExtra(self, glob, client_utils_common): - """Test the hash collection action when extra unknown files are present.""" - - client_utils_common.Execute = MockExecute - glob.glob.return_value = ["./MBP61.ealf", "$(id).ealf", "`id`.ealf"] - - args = rdf_apple_firmware.EficheckConfig() - with mock.patch.object(tempfiles, "DeleteGRRTempFile", - lambda filename: None): - results = self.RunAction(firmware.EficheckCollectHashes, args) - self.assertLen(results, 1) - - def testEficheckDumpImage(self, glob, client_utils_common): - """Test the basic dump action.""" - - client_utils_common.Execute = MockExecute - - args = rdf_apple_firmware.EficheckConfig() - with mock.patch.object(tempfiles, "GetDefaultGRRTempDirectory", - lambda **kw: os.path.abspath(self.temp_dir)): - result = self.RunAction(firmware.EficheckDumpImage, args)[0] - - self.assertEqual(result.eficheck_version, "v1.14") - self.assertEqual(result.response.stderr, b"") - self.assertStartsWith(result.path.path, self.temp_dir) - self.assertEndsWith(result.path.path, "/firmware.bin") - - def testFailedEficheckDumpImageVersion(self, glob, client_utils_common): - """Test for failure of the dump action when reading the version.""" - - client_utils_common.Execute = FailedMockExecute - - args = rdf_apple_firmware.EficheckConfig() - result = self.RunAction(firmware.EficheckDumpImage, args)[0] - - self.assertEqual(result.response.stderr, - b"Unable to find the eficheck binary") - - def testFailedEficheckDumpImage(self, glob, client_utils_common): - """Test for failure of the basic dump action.""" - - client_utils_common.Execute = FailedDumpMockExecute - - args = rdf_apple_firmware.EficheckConfig() - result = self.RunAction(firmware.EficheckDumpImage, args)[0] - - self.assertEqual(result.eficheck_version, "v1.14") - self.assertEqual(result.response.stderr, b"Unable to dump the binary image") - - -def main(argv): - test_lib.main(argv) - - -if __name__ == "__main__": - app.run(main) diff --git a/grr/client/grr_response_client/client_actions/osx/osx.py b/grr/client/grr_response_client/client_actions/osx/osx.py index dfd80cdde8..15e957bea1 100644 --- a/grr/client/grr_response_client/client_actions/osx/osx.py +++ b/grr/client/grr_response_client/client_actions/osx/osx.py @@ -7,13 +7,10 @@ """ import ctypes -import logging import os import re -import shutil import socket import struct -import sys import pytsk3 @@ -22,7 +19,6 @@ from grr_response_client import client_utils_osx from grr_response_client.client_actions import standard from grr_response_client.osx import objc -from grr_response_core import config from grr_response_core.lib import rdfvalue from grr_response_core.lib.parsers import osx_launchd from grr_response_core.lib.rdfvalues import client as rdf_client @@ -424,43 +420,6 @@ def Run(self, args): self.SendReply(res) -class Uninstall(actions.ActionPlugin): - """Remove the service that starts us at startup.""" - out_rdfvalues = [rdf_protodict.DataBlob] - - def Run(self, unused_arg): - """This kills us with no cleanups.""" - logging.debug("Disabling service") - - msg = "Service disabled." - if hasattr(sys, "frozen"): - grr_binary = os.path.abspath(sys.executable) - elif __file__: - grr_binary = os.path.abspath(__file__) - - try: - os.remove(grr_binary) - except OSError: - msg = "Could not remove binary." - - try: - os.remove(config.CONFIG["Client.plist_path"]) - except OSError: - if "Could not" in msg: - msg += " Could not remove plist file." - else: - msg = "Could not remove plist file." - - # Get the directory we are running in from pyinstaller. This is either the - # GRR directory which we should delete (onedir mode) or a generated temp - # directory which we can delete without problems in onefile mode. - directory = getattr(sys, "_MEIPASS", None) - if directory: - shutil.rmtree(directory, ignore_errors=True) - - self.SendReply(rdf_protodict.DataBlob(string=msg)) - - class UpdateAgent(standard.ExecuteBinaryCommand): """Updates the GRR agent to a new version.""" diff --git a/grr/client/grr_response_client/client_actions/registry_init.py b/grr/client/grr_response_client/client_actions/registry_init.py index d54666600b..53c6a9b87a 100644 --- a/grr/client/grr_response_client/client_actions/registry_init.py +++ b/grr/client/grr_response_client/client_actions/registry_init.py @@ -8,6 +8,7 @@ from grr_response_client.client_actions import admin from grr_response_client.client_actions import artifact_collector from grr_response_client.client_actions import cloud +from grr_response_client.client_actions import dummy from grr_response_client.client_actions import file_finder from grr_response_client.client_actions import file_fingerprint from grr_response_client.client_actions import large_file @@ -20,6 +21,7 @@ from grr_response_client.client_actions import tempfiles from grr_response_client.client_actions import timeline from grr_response_client.client_actions import vfs_file_finder +from grr_response_client.client_actions.windows import dummy as win_dummy from grr_response_client.client_actions.windows import pipes @@ -60,7 +62,6 @@ def RegisterClientActions(): client_actions.Register("ReadBuffer", standard.ReadBuffer) client_actions.Register("ReadLowLevel", read_low_level.ReadLowLevel) client_actions.Register("Segfault", standard.Segfault) - client_actions.Register("SendFile", standard.SendFile) client_actions.Register("SendStartupInfo", admin.SendStartupInfo) client_actions.Register("StatFS", standard.StatFS) client_actions.Register("Timeline", timeline.Timeline) @@ -72,13 +73,14 @@ def RegisterClientActions(): if platform.system() == "Linux": from grr_response_client.client_actions.linux import linux # pylint: disable=g-import-not-at-top + + client_actions.Register("Dummy", dummy.Dummy) client_actions.Register("EnumerateFilesystems", linux.EnumerateFilesystems) client_actions.Register("EnumerateInterfaces", linux.EnumerateInterfaces) client_actions.Register("EnumerateRunningServices", linux.EnumerateRunningServices) client_actions.Register("EnumerateUsers", linux.EnumerateUsers) client_actions.Register("GetInstallDate", linux.GetInstallDate) - client_actions.Register("Uninstall", linux.Uninstall) client_actions.Register("UpdateAgent", linux.UpdateAgent) if hasattr(sys, "frozen"): @@ -88,12 +90,13 @@ def RegisterClientActions(): elif platform.system() == "Windows": from grr_response_client.client_actions.windows import windows # pylint: disable=g-import-not-at-top + + client_actions.Register("Dummy", win_dummy.Dummy) client_actions.Register("EnumerateFilesystems", windows.EnumerateFilesystems) client_actions.Register("EnumerateInterfaces", windows.EnumerateInterfaces) client_actions.Register("GetInstallDate", windows.GetInstallDate) client_actions.Register("WmiQuery", windows.WmiQuery) - client_actions.Register("Uninstall", windows.Uninstall) client_actions.Register("UpdateAgent", windows.UpdateAgent) client_actions.Register("ListNamedPipes", pipes.ListNamedPipesAction) @@ -104,10 +107,4 @@ def RegisterClientActions(): client_actions.Register("GetInstallDate", osx.GetInstallDate) client_actions.Register("OSXEnumerateRunningServices", osx.OSXEnumerateRunningServices) - client_actions.Register("Uninstall", osx.Uninstall) client_actions.Register("UpdateAgent", osx.UpdateAgent) - - from grr_response_client.client_actions.osx import firmware # pylint: disable=g-import-not-at-top - client_actions.Register("EficheckCollectHashes", - firmware.EficheckCollectHashes) - client_actions.Register("EficheckDumpImage", firmware.EficheckDumpImage) diff --git a/grr/client/grr_response_client/client_actions/standard.py b/grr/client/grr_response_client/client_actions/standard.py index ba4c802a08..de9d89c72c 100644 --- a/grr/client/grr_response_client/client_actions/standard.py +++ b/grr/client/grr_response_client/client_actions/standard.py @@ -7,7 +7,6 @@ import logging import os import platform -import socket import sys from typing import Text from unittest import mock @@ -26,8 +25,6 @@ from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import client_action as rdf_client_action from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs -from grr_response_core.lib.rdfvalues import client_network as rdf_client_network -from grr_response_core.lib.rdfvalues import crypto as rdf_crypto from grr_response_core.lib.rdfvalues import flows as rdf_flows from grr_response_core.lib.rdfvalues import paths as rdf_paths from grr_response_core.lib.rdfvalues import protodict as rdf_protodict @@ -416,61 +413,6 @@ def Run(self, args): self.Progress() -class SendFile(actions.ActionPlugin): - """This action encrypts and sends a file to a remote listener.""" - in_rdfvalue = rdf_client_action.SendFileRequest - out_rdfvalues = [rdf_client_fs.StatEntry] - - # 10 MB. - BLOCK_SIZE = 1024 * 1024 * 10 - - def Send(self, sock, msg): - totalsent = 0 - n = len(msg) - while totalsent < n: - sent = sock.send(msg[totalsent:]) - if sent == 0: - raise RuntimeError("socket connection broken") - totalsent += sent - - def Run(self, args): - """Run.""" - - # Open the file. - fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) - - if args.address_family == rdf_client_network.NetworkAddress.Family.INET: - family = socket.AF_INET - elif args.address_family == rdf_client_network.NetworkAddress.Family.INET6: - family = socket.AF_INET6 - else: - raise RuntimeError("Socket address family not supported.") - - s = socket.socket(family, socket.SOCK_STREAM) - - try: - s.connect((args.host, args.port)) - except socket.error as e: - raise RuntimeError(str(e)) - - cipher = rdf_crypto.AES128CBCCipher(args.key, args.iv) - streaming_encryptor = rdf_crypto.StreamingCBCEncryptor(cipher) - - while True: - data = fd.read(self.BLOCK_SIZE) - if not data: - break - - self.Send(s, streaming_encryptor.Update(data)) - # Send heartbeats for long files. - self.Progress() - - self.Send(s, streaming_encryptor.Finalize()) - s.close() - - self.SendReply(fd.Stat()) - - def StatFSFromClient(args): """Call os.statvfs for a given list of paths. diff --git a/grr/client/grr_response_client/client_actions/tempfiles_test.py b/grr/client/grr_response_client/client_actions/tempfiles_test.py index 4cf14f06bc..87f8aee1d4 100644 --- a/grr/client/grr_response_client/client_actions/tempfiles_test.py +++ b/grr/client/grr_response_client/client_actions/tempfiles_test.py @@ -61,7 +61,8 @@ def testWrongOwnerGetsFixed(self): lstat = os.lstat - def mystat(filename): + def mystat(filename, *, dir_fd=None): + del dir_fd # Unused. stat_info = lstat(filename) stat_list = list(stat_info) # Adjust the UID. diff --git a/grr/client/grr_response_client/client_actions/vfs_file_finder.py b/grr/client/grr_response_client/client_actions/vfs_file_finder.py index 9f150b9fc6..1d6318618d 100644 --- a/grr/client/grr_response_client/client_actions/vfs_file_finder.py +++ b/grr/client/grr_response_client/client_actions/vfs_file_finder.py @@ -70,12 +70,16 @@ def _CheckConditionsShortCircuit(content_conditions, pathspec): """Checks all `content_conditions` until one yields no matches.""" matches = [] for cond in content_conditions: + cur_matches = [] with vfs.VFSOpen(pathspec) as vfs_file: - if vfs_file.size == 0 or vfs_file.size is None: - # Skip directories. - cur_matches = [] - else: + is_registry = ( + vfs_file.supported_pathtype == rdf_paths.PathSpec.PathType.REGISTRY + ) + # Do the actual matching for registry files or for files with a well + # defined size. + if is_registry or (vfs_file.size is not None and vfs_file.size > 0): cur_matches = list(cond.Search(vfs_file)) + if cur_matches: matches.extend(cur_matches) else: # As soon as one condition does not match, we skip the file. diff --git a/grr/client/grr_response_client/client_actions/windows/dummy.py b/grr/client/grr_response_client/client_actions/windows/dummy.py new file mode 100644 index 0000000000..2a4daf4dbc --- /dev/null +++ b/grr/client/grr_response_client/client_actions/windows/dummy.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +"""The Dummy client action.""" + +from grr_response_client import actions +from grr_response_core.lib.rdfvalues import dummy as rdf_dummy + + +class Dummy(actions.ActionPlugin): + """Returns the received string.""" + + in_rdfvalue = rdf_dummy.DummyRequest + out_rdfvalues = [rdf_dummy.DummyResult] + + def Run(self, args: rdf_dummy.DummyRequest) -> None: + """Returns received input back to the server, but in Windows.""" + + if not args.action_input: + raise RuntimeError("WIN args.action_input is empty, cannot proceed!") + + self.SendReply( + rdf_dummy.DummyResult( + action_output=f"WIN args.action_input: '{args.action_input}'" + ) + ) diff --git a/grr/client/grr_response_client/client_actions/windows/dummy_test.py b/grr/client/grr_response_client/client_actions/windows/dummy_test.py new file mode 100644 index 0000000000..6579b3e9f3 --- /dev/null +++ b/grr/client/grr_response_client/client_actions/windows/dummy_test.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +"""Tests for dummy.""" + +from absl import app + +from grr_response_client.client_actions.windows import dummy +from grr_response_core.lib.rdfvalues import dummy as rdf_dummy +from grr_response_core.lib.rdfvalues import flows as rdf_flows +from grr.test_lib import client_test_lib +from grr.test_lib import test_lib + + +class DummyTest(client_test_lib.EmptyActionTest): + """Test Dummy action.""" + + def testDummyReceived(self): + action_request = rdf_dummy.DummyRequest(action_input="banana") + + # We use `ExecuteAction` instead of `RunAction` to test `status` result too. + results = self.ExecuteAction(dummy.Dummy, action_request) + + # One result, and one status message. + self.assertLen(results, 2) + + self.assertIsInstance(results[0], rdf_dummy.DummyResult) + self.assertIn("banana", results[0].action_output) + self.assertIn("WIN", results[0].action_output) + + self.assertIsInstance(results[1], rdf_flows.GrrStatus) + self.assertEqual(rdf_flows.GrrStatus.ReturnedStatus.OK, results[1].status) + self.assertEmpty(results[1].error_message) + + def testErrorsOnEmptyInput(self): + action_request = rdf_dummy.DummyRequest() + + # We use `ExecuteAction` instead of `RunAction` to test `status` result too. + results = self.ExecuteAction(dummy.Dummy, action_request) + + # One status message. + self.assertLen(results, 1) + + self.assertIsInstance(results[0], rdf_flows.GrrStatus) + self.assertEqual( + rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR, results[0].status + ) + self.assertIn("empty", results[0].error_message) + + +def main(argv): + test_lib.main(argv) + + +if __name__ == "__main__": + app.run(main) diff --git a/grr/client/grr_response_client/client_actions/windows/windows.py b/grr/client/grr_response_client/client_actions/windows/windows.py index b8dedbb4fa..34ac6eb173 100644 --- a/grr/client/grr_response_client/client_actions/windows/windows.py +++ b/grr/client/grr_response_client/client_actions/windows/windows.py @@ -7,7 +7,6 @@ """ import binascii -import logging import pythoncom import win32api @@ -20,7 +19,6 @@ from grr_response_client import actions from grr_response_client.client_actions import standard -from grr_response_core import config from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import client_action as rdf_client_action from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs @@ -120,27 +118,6 @@ def Run(self, args): self.SendReply(res) -class Uninstall(actions.ActionPlugin): - """Remove the service that starts us at startup.""" - out_rdfvalues = [rdf_protodict.DataBlob] - - def Run(self, unused_arg): - """This kills us with no cleanups.""" - logging.debug("Disabling service") - - win32serviceutil.ChangeServiceConfig( - None, - config.CONFIG["Nanny.service_name"], - startType=win32service.SERVICE_DISABLED) - svc_config = QueryService(config.CONFIG["Nanny.service_name"]) - if svc_config[1] == win32service.SERVICE_DISABLED: - logging.info("Disabled service successfully") - self.SendReply(rdf_protodict.DataBlob(string="Service disabled.")) - else: - self.SendReply( - rdf_protodict.DataBlob(string="Service failed to disable.")) - - def QueryService(svc_name): """Query service and get its config.""" hscm = win32service.OpenSCManager(None, None, diff --git a/grr/client/grr_response_client/client_utils.py b/grr/client/grr_response_client/client_utils.py index a5bc162d75..06b08b4cda 100644 --- a/grr/client/grr_response_client/client_utils.py +++ b/grr/client/grr_response_client/client_utils.py @@ -25,7 +25,6 @@ FindProxies = _client_utils.FindProxies GetExtAttrs = _client_utils.GetExtAttrs GetRawDevice = _client_utils.GetRawDevice -KeepAlive = _client_utils.KeepAlive LocalPathToCanonicalPath = _client_utils.LocalPathToCanonicalPath MemoryRegions = _client_utils.MemoryRegions OpenProcessForMemoryAccess = _client_utils.OpenProcessForMemoryAccess diff --git a/grr/client/grr_response_client/client_utils_common.py b/grr/client/grr_response_client/client_utils_common.py index a825d11376..8fe78ec9cb 100644 --- a/grr/client/grr_response_client/client_utils_common.py +++ b/grr/client/grr_response_client/client_utils_common.py @@ -164,6 +164,7 @@ def IsExecutionAllowed(cmd, args): allowlist = [ ("/bin/df", []), ("/bin/echo", ["1"]), + ("/bin/mount", []), ("/bin/rpm", ["-qa"]), ("/bin/sleep", ["10"]), ("/sbin/auditctl", ["-l"]), @@ -178,6 +179,7 @@ def IsExecutionAllowed(cmd, args): ("/usr/sbin/arp", ["-a"]), ("/usr/sbin/dmidecode", ["-q"]), ("/usr/sbin/sshd", ["-T"]), + ("/opt/CrowdStrike/falconctl", ["-g", "--cid", "--aid"]), ] elif platform.system() == "Darwin": allowlist = [ diff --git a/grr/client/grr_response_client/client_utils_linux.py b/grr/client/grr_response_client/client_utils_linux.py index 945248c545..4374a41703 100644 --- a/grr/client/grr_response_client/client_utils_linux.py +++ b/grr/client/grr_response_client/client_utils_linux.py @@ -101,11 +101,6 @@ def GetRawDevice(path): mount_point = os.path.dirname(mount_point) -def KeepAlive(): - # Not yet supported for Linux. - pass - - def OpenProcessForMemoryAccess(pid=None): return process.Process(pid=pid) diff --git a/grr/client/grr_response_client/client_utils_osx.py b/grr/client/grr_response_client/client_utils_osx.py index ba3b54c3c0..1e177e6a94 100644 --- a/grr/client/grr_response_client/client_utils_osx.py +++ b/grr/client/grr_response_client/client_utils_osx.py @@ -246,11 +246,6 @@ def VersionString(self): return self.version -def KeepAlive(): - # Not yet supported for OSX. - pass - - def OpenProcessForMemoryAccess(pid=None): return process.Process(pid=pid) diff --git a/grr/client/grr_response_client/client_utils_windows.py b/grr/client/grr_response_client/client_utils_windows.py index 794d928c18..f1c9220ba8 100644 --- a/grr/client/grr_response_client/client_utils_windows.py +++ b/grr/client/grr_response_client/client_utils_windows.py @@ -317,14 +317,6 @@ def Get(self): return -def KeepAlive(): - - es_system_required = 0x00000001 - - kernel32 = Kernel32().kernel32 - kernel32.SetThreadExecutionState(ctypes.c_int(es_system_required)) - - def RtlGetVersion(os_version_info_struct): """Wraps the lowlevel RtlGetVersion routine. diff --git a/grr/client_builder/grr_response_client_builder/build_helpers.py b/grr/client_builder/grr_response_client_builder/build_helpers.py index 0695d3078c..5c7d8d1b41 100644 --- a/grr/client_builder/grr_response_client_builder/build_helpers.py +++ b/grr/client_builder/grr_response_client_builder/build_helpers.py @@ -2,6 +2,7 @@ """Helper functions used by client building/repacking process.""" +import datetime import io import logging import os @@ -203,7 +204,8 @@ def WriteBuildYaml(fd, build_timestamp=True, context=None): yaml_keys = set(build.REQUIRED_BUILD_YAML_KEYS) if build_timestamp: - output["Client.build_time"] = rdfvalue.RDFDatetime.Now() + now = datetime.datetime.now(datetime.timezone.utc) + output["Client.build_time"] = now.isoformat() else: yaml_keys.remove("Client.build_time") diff --git a/grr/client_builder/grr_response_client_builder/build_helpers_test.py b/grr/client_builder/grr_response_client_builder/build_helpers_test.py index 0ec79dd95d..bb1d2f7e2b 100644 --- a/grr/client_builder/grr_response_client_builder/build_helpers_test.py +++ b/grr/client_builder/grr_response_client_builder/build_helpers_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python """Tests for building and repacking clients.""" - +import datetime import io import os from unittest import mock @@ -27,8 +27,6 @@ def testWriteBuildYaml(self): expected = { "Client.build_environment": "cp27-cp27mu-linux_x86_64", - "Client.build_time": - "2016-05-24 20:04:25", "Template.build_type": "Release", "Template.build_context": @@ -49,10 +47,18 @@ def testWriteBuildYaml(self): with mock.patch.object(rdf_client.Uname, "FromCurrentSystem") as fcs: fcs.return_value.signature.return_value = "cp27-cp27mu-linux_x86_64" - with test_lib.FakeTime(1464120265): - build_helpers.WriteBuildYaml(fd, context=context) - self.assertEqual(yaml.safe_load(fd.getvalue()), expected) + before_time = datetime.datetime.now(datetime.timezone.utc) + build_helpers.WriteBuildYaml(fd, context=context) + after_time = datetime.datetime.now(datetime.timezone.utc) + + result = yaml.safe_load(fd.getvalue()) + + build_time = datetime.datetime.fromisoformat(result["Client.build_time"]) + self.assertBetween(build_time, before_time, after_time) + del result["Client.build_time"] + + self.assertEqual(result, expected) def testGenClientConfig(self): with test_lib.ConfigOverrider({"Client.build_environment": "test_env"}): diff --git a/grr/client_builder/grr_response_client_builder/pkg_utils.py b/grr/client_builder/grr_response_client_builder/pkg_utils.py index c54f1da693..5361936573 100644 --- a/grr/client_builder/grr_response_client_builder/pkg_utils.py +++ b/grr/client_builder/grr_response_client_builder/pkg_utils.py @@ -223,8 +223,8 @@ def _BuildToc(src_toc_path: str, files_dir: str) -> _BuildTocResult: file_order = [] dom = xml.dom.minidom.parse(src_toc_path) - _SetXmlChildAttribute(dom, "checksum", "style", "sha1") - checksum_elem = _XmlChild(dom, "checksum") + _SetXmlChildAttribute(dom, "checksum", "style", "sha1") # pytype: disable=wrong-arg-types + checksum_elem = _XmlChild(dom, "checksum") # pytype: disable=wrong-arg-types _SetXmlChildValue(checksum_elem, "offset", 0) _SetXmlChildValue(checksum_elem, "size", hashlib.sha1().digest_size) diff --git a/grr/core/grr_response_core/config/__init__.py b/grr/core/grr_response_core/config/__init__.py index e0ecec04cb..419b9f7b52 100644 --- a/grr/core/grr_response_core/config/__init__.py +++ b/grr/core/grr_response_core/config/__init__.py @@ -6,7 +6,6 @@ from grr_response_core.config import api from grr_response_core.config import artifacts from grr_response_core.config import build -from grr_response_core.config import checks from grr_response_core.config import client from grr_response_core.config import config from grr_response_core.config import contexts diff --git a/grr/core/grr_response_core/config/artifacts.py b/grr/core/grr_response_core/config/artifacts.py index ffc737661d..cd2a193965 100644 --- a/grr/core/grr_response_core/config/artifacts.py +++ b/grr/core/grr_response_core/config/artifacts.py @@ -14,30 +14,7 @@ "Artifacts.knowledge_base", [ "LinuxReleaseInfo", "LinuxUserProfiles", - "UsersDirectory", - "WindowsCodePage", - "WindowsDomainName", - "WindowsEnvironmentVariableAllUsersAppData", - "WindowsEnvironmentVariableAllUsersProfile", - "WindowsEnvironmentVariableCommonProgramFiles", - "WindowsEnvironmentVariableCommonProgramFilesX86", - "WindowsEnvironmentVariableComSpec", - "WindowsEnvironmentVariableDriverData", - "WindowsEnvironmentVariablePath", - "WindowsEnvironmentVariableProfilesDirectory", - "WindowsEnvironmentVariableProgramData", - "WindowsEnvironmentVariableProgramFiles", - "WindowsEnvironmentVariableProgramFilesX86", - "WindowsEnvironmentVariableSystemDrive", - "WindowsEnvironmentVariableSystemRoot", - "WindowsEnvironmentVariableTemp", - "WindowsEnvironmentVariableWinDir", - "WindowsRegistryCurrentControlSet", - "WindowsRegistryProfiles", - "WindowsUserShellFolders", "WindowsTimezone", - "WMIAccountUsersDomain", - "WMIProfileUsersHomeDir", ], "List of artifacts that are collected regularly by" " interrogate and used for interpolation of client-side" " variables. Includes artifacts for all supported OSes. " @@ -64,10 +41,12 @@ " whole list.") config_lib.DEFINE_list( - "Artifacts.knowledge_base_heavyweight", ["WMIAccountUsersDomain"], + "Artifacts.knowledge_base_heavyweight", + [], "Artifacts to skip when the 'lightweight' option is" " set on interrogate. These artifacts are too expensive" - " or slow to collect regularly from all machines.") + " or slow to collect regularly from all machines.", +) config_lib.DEFINE_list( "Artifacts.netgroup_filter_regexes", [], @@ -78,8 +57,3 @@ "Artifacts.netgroup_ignore_users", [], help="Exclude these users when parsing /etc/netgroup " "files.") - -config_lib.DEFINE_list( - name="Artifacts.edr_agents", - default=[], - help="Artifacts used for collecting metadata about EDR agents.") diff --git a/grr/core/grr_response_core/config/checks.py b/grr/core/grr_response_core/config/checks.py deleted file mode 100644 index 3f71afa017..0000000000 --- a/grr/core/grr_response_core/config/checks.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -"""Configuration parameters for the check subsystem.""" - -from grr_response_core.lib import config_lib - -config_lib.DEFINE_list("Checks.config_dir", [ - "%(grr_response_server/checks@grr-response-server|resource)", -], "A list of directories to load checks from.") - -config_lib.DEFINE_list("Checks.config_files", [], - "Paths of check configurations to load at start up.") - -config_lib.DEFINE_integer("Checks.max_results", 50, - "Maximum items to include as check results.") diff --git a/grr/core/grr_response_core/config/server.py b/grr/core/grr_response_core/config/server.py index 7c22275eae..7168fe7009 100644 --- a/grr/core/grr_response_core/config/server.py +++ b/grr/core/grr_response_core/config/server.py @@ -343,3 +343,12 @@ "will be used instead. " "NB: internal option, subject to change without notice.", ) + +config_lib.DEFINE_boolean( + name="Interrogate.collect_crowdstrike_agent_id", + default=False, + help=( + "Whether the interrogate flow should collect identifier of the " + "endpoint's CrowdStrike agent." + ), +) diff --git a/grr/core/grr_response_core/lib/parsers/all.py b/grr/core/grr_response_core/lib/parsers/all.py index d29d7a94f3..94228a06a6 100644 --- a/grr/core/grr_response_core/lib/parsers/all.py +++ b/grr/core/grr_response_core/lib/parsers/all.py @@ -5,7 +5,6 @@ from grr_response_core.lib.parsers import chrome_history from grr_response_core.lib.parsers import config_file from grr_response_core.lib.parsers import cron_file_parser -from grr_response_core.lib.parsers import eficheck_parser from grr_response_core.lib.parsers import firefox3_history from grr_response_core.lib.parsers import ie_history from grr_response_core.lib.parsers import linux_cmd_parser @@ -31,8 +30,6 @@ def Register(): "Dpkg", linux_cmd_parser.DpkgCmdParser) parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register( "Dmidecode", linux_cmd_parser.DmidecodeCmdParser) - parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register( - "Eficheck", eficheck_parser.EficheckCmdParser) parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register( "Mount", config_file.MountCmdParser) parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register( @@ -69,8 +66,6 @@ def Register(): "WmiLogicalDisks", wmi_parser.WMILogicalDisksParser) parsers.MULTI_RESPONSE_PARSER_FACTORY.Register( "WmiCsp", wmi_parser.WMIComputerSystemProductParser) - parsers.MULTI_RESPONSE_PARSER_FACTORY.Register( - "WmiInterfaces", wmi_parser.WMIInterfacesParser) # Registry value parsers. parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register( diff --git a/grr/core/grr_response_core/lib/parsers/eficheck_parser.py b/grr/core/grr_response_core/lib/parsers/eficheck_parser.py deleted file mode 100644 index 5c576a5a53..0000000000 --- a/grr/core/grr_response_core/lib/parsers/eficheck_parser.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python -"""Parser for eficheck output.""" - -from grr_response_core.lib import parser -from grr_response_core.lib.rdfvalues import apple_firmware as rdf_apple_firmware - - -class EficheckCmdParser(parser.CommandParser): - """Parser for eficheck --show-hashes.""" - - output_types = [rdf_apple_firmware.EfiCollection] - # TODO(user): Add default artifact for this parser. - supported_artifacts = [] - - def Parse(self, cmd, args, stdout, stderr, return_val, knowledge_base): - """Parse the eficheck output.""" - _ = stderr, args, knowledge_base # Unused. - self.CheckReturn(cmd, return_val) - - collection = rdf_apple_firmware.EfiCollection() - # The exact number of header lines may change. So try to parse and continue - # if that fails. - for line in stdout.decode("utf-8").splitlines(): - cols = line.split(":") - try: - volume_type, flags, index, address, size, guid, hash_value = cols - entry = rdf_apple_firmware.EfiEntry( - volume_type=int(volume_type), - flags=int(flags, 16), - index=int(index, 16), - address=int(address, 16), - size=int(size, 16), - guid=guid, - hash=hash_value) - collection.entries.append(entry) - except ValueError: - pass - - yield collection diff --git a/grr/core/grr_response_core/lib/parsers/eficheck_parser_test.py b/grr/core/grr_response_core/lib/parsers/eficheck_parser_test.py deleted file mode 100644 index 4214cecf4c..0000000000 --- a/grr/core/grr_response_core/lib/parsers/eficheck_parser_test.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python -"""Tests for grr.parsers.eficheck_parser.""" - - -import io -import os - -from absl import app - -from grr_response_core.lib.parsers import eficheck_parser -from grr.test_lib import test_lib - - -class TestEficheckParsing(test_lib.GRRBaseTest): - """Test parsing of OSX files.""" - - def testEficheckShowHashes(self): - parser = eficheck_parser.EficheckCmdParser() - test_data_path = os.path.join(self.base_path, "eficheck_show_hashes.txt") - content = io.open(test_data_path, mode="rb").read() - result = list( - parser.Parse("/usr/sbin/eficheck", ["--show-hashes"], content, b"", 0, - None)) - - self.assertLen(result, 1) - self.assertLen(result[0].entries, 6) - self.assertEqual(result[0].entries[0].size, 8192) - self.assertEqual(result[0].entries[0].guid, - "7a9354d9-0468-444a-81ce-0bf617d890df") - self.assertEqual(result[0].entries[0].hash, - ("6ba638dfa7c9a7ccf75016e98d2074c5" - "3e38f5ae90edfa06672aafb6c7d1c4f7")) - - -def main(argv): - # Run the full test suite - test_lib.main(argv) - - -if __name__ == "__main__": - app.run(main) diff --git a/grr/core/grr_response_core/lib/parsers/linux_release_parser.py b/grr/core/grr_response_core/lib/parsers/linux_release_parser.py index 23b38f028b..083833d197 100644 --- a/grr/core/grr_response_core/lib/parsers/linux_release_parser.py +++ b/grr/core/grr_response_core/lib/parsers/linux_release_parser.py @@ -123,7 +123,7 @@ def Parse(self): complete = False data = self.contents.strip() - if self.name in ['RedHat', 'OracleLinux', 'OEL']: + if self.name in ['RedHat', 'OracleLinux', 'OEL', 'Rocky']: check = self.RH_RE.search(data) if check is not None: major = int(check.group(1)) @@ -149,17 +149,25 @@ class LinuxReleaseParser(parsers.MultiFileParser[rdf_protodict.Dict]): # Top priority: systems with lsb-release. WeightedReleaseFile(0, '/etc/lsb-release', LsbReleaseParseHandler), # Oracle Linux (formerly OEL). - WeightedReleaseFile(10, '/etc/oracle-release', - ReleaseFileParseHandler('OracleLinux')), + WeightedReleaseFile( + 10, '/etc/oracle-release', ReleaseFileParseHandler('OracleLinux') + ), # OEL. - WeightedReleaseFile(11, '/etc/enterprise-release', - ReleaseFileParseHandler('OEL')), + WeightedReleaseFile( + 11, '/etc/enterprise-release', ReleaseFileParseHandler('OEL') + ), + # Rocky. + WeightedReleaseFile( + 12, '/etc/rocky-release', ReleaseFileParseHandler('Rocky') + ), # RHEL-based. - WeightedReleaseFile(20, '/etc/redhat-release', - ReleaseFileParseHandler('RedHat')), + WeightedReleaseFile( + 20, '/etc/redhat-release', ReleaseFileParseHandler('RedHat') + ), # Debian-based. - WeightedReleaseFile(20, '/etc/debian_version', - ReleaseFileParseHandler('Debian')), + WeightedReleaseFile( + 20, '/etc/debian_version', ReleaseFileParseHandler('Debian') + ), # TODO(user): These weights are pointless - we can remove # them while preserving functionality. ReleaseFileParseHandler should # be deleted and replaced with a function. diff --git a/grr/core/grr_response_core/lib/parsers/linux_release_parser_test.py b/grr/core/grr_response_core/lib/parsers/linux_release_parser_test.py index 0bc85ef88b..6fb4259630 100644 --- a/grr/core/grr_response_core/lib/parsers/linux_release_parser_test.py +++ b/grr/core/grr_response_core/lib/parsers/linux_release_parser_test.py @@ -109,10 +109,18 @@ def testEndToEndOracleLinux(self): parser = linux_release_parser.LinuxReleaseParser() testdata = [ - ("/etc/lsb-release", - os.path.join(self.parser_test_dir, "lsb-release-notubuntu")), - ("/etc/oracle-release", - os.path.join(self.parser_test_dir, "oracle-release")), + ( + "/etc/lsb-release", + os.path.join(self.parser_test_dir, "lsb-release-notubuntu"), + ), + ( + "/etc/redhat-release", + os.path.join(self.parser_test_dir, "redhat-release"), + ), + ( + "/etc/oracle-release", + os.path.join(self.parser_test_dir, "oracle-release"), + ), ] pathspecs, files = self._CreateTestData(testdata) @@ -123,6 +131,32 @@ def testEndToEndOracleLinux(self): self.assertEqual(6, result["os_major_version"]) self.assertEqual(5, result["os_minor_version"]) + def testEndToEndRockyLinux(self): + parser = linux_release_parser.LinuxReleaseParser() + + testdata = [ + ( + "/etc/lsb-release", + os.path.join(self.parser_test_dir, "lsb-release-notubuntu"), + ), + ( + "/etc/redhat-release", + os.path.join(self.parser_test_dir, "redhat-release"), + ), + ( + "/etc/rocky-release", + os.path.join(self.parser_test_dir, "rocky-release"), + ), + ] + pathspecs, files = self._CreateTestData(testdata) + + result = list(parser.ParseFiles(None, pathspecs, files)).pop() + + self.assertIsInstance(result, rdf_protodict.Dict) + self.assertEqual("Rocky", result["os_release"]) + self.assertEqual(8, result["os_major_version"]) + self.assertEqual(8, result["os_minor_version"]) + def testEndToEndAmazon(self): parser = linux_release_parser.LinuxReleaseParser() test_data = [ diff --git a/grr/core/grr_response_core/lib/parsers/osx_file_parser.py b/grr/core/grr_response_core/lib/parsers/osx_file_parser.py index c298876fa0..770705fba0 100644 --- a/grr/core/grr_response_core/lib/parsers/osx_file_parser.py +++ b/grr/core/grr_response_core/lib/parsers/osx_file_parser.py @@ -25,7 +25,10 @@ class OSXUsersParser(parsers.MultiResponseParser[rdf_client.User]): """Parser for Glob of /Users/*.""" output_types = [rdf_client.User] - supported_artifacts = ["UsersDirectory"] + + # TODO: The parser has to be invoked explicitly, we should not + # relly on magic parsing anymore. + supported_artifacts = [] _ignore_users = ["Shared"] diff --git a/grr/core/grr_response_core/lib/parsers/wmi_parser.py b/grr/core/grr_response_core/lib/parsers/wmi_parser.py index a81839c851..75986baaa5 100644 --- a/grr/core/grr_response_core/lib/parsers/wmi_parser.py +++ b/grr/core/grr_response_core/lib/parsers/wmi_parser.py @@ -2,7 +2,6 @@ """Simple parsers for the output of WMI queries.""" -import binascii import calendar import struct import time @@ -13,7 +12,6 @@ from grr_response_core.lib.rdfvalues import anomaly as rdf_anomaly from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs -from grr_response_core.lib.rdfvalues import client_network as rdf_client_network from grr_response_core.lib.rdfvalues import wmi as rdf_wmi from grr_response_core.lib.util import precondition @@ -282,81 +280,3 @@ def ParseMultiple(self, result_dicts): yield rdf_client.HardwareInfo( serial_number=result_dict["IdentifyingNumber"], system_manufacturer=result_dict["Vendor"]) - - -class WMIInterfacesParser(parser.WMIQueryParser): - """Parser for WMI output. Yields SoftwarePackage rdfvalues.""" - - output_types = [ - rdf_client_network.Interface, - rdf_client_network.DNSClientConfiguration, - ] - supported_artifacts = [] - - def WMITimeStrToRDFDatetime(self, timestr): - """Return RDFDatetime from string like 20140825162259.000000-420. - - Args: - timestr: WMI time string - - Returns: - rdfvalue.RDFDatetime - - We have some timezone manipulation work to do here because the UTC offset is - in minutes rather than +-HHMM - """ - # We use manual parsing here because the time functions provided (datetime, - # dateutil) do not properly deal with timezone information. - offset_minutes = timestr[21:] - year = timestr[:4] - month = timestr[4:6] - day = timestr[6:8] - hours = timestr[8:10] - minutes = timestr[10:12] - seconds = timestr[12:14] - microseconds = timestr[15:21] - - unix_seconds = calendar.timegm( - tuple(map(int, [year, month, day, hours, minutes, seconds]))) - unix_seconds -= int(offset_minutes) * 60 - return rdfvalue.RDFDatetime(unix_seconds * 1e6 + int(microseconds)) - - def _ConvertIPs(self, io_tuples, interface, output_dict): - for inputkey, outputkey in io_tuples: - addresses = [] - if isinstance(interface[inputkey], list): - for ip_address in interface[inputkey]: - addresses.append( - rdf_client_network.NetworkAddress( - human_readable_address=ip_address)) - else: - addresses.append( - rdf_client_network.NetworkAddress( - human_readable_address=interface[inputkey])) - output_dict[outputkey] = addresses - return output_dict - - def ParseMultiple(self, result_dicts): - """Parse the WMI packages output.""" - for result_dict in result_dicts: - args = {"ifname": result_dict["Description"]} - args["mac_address"] = binascii.unhexlify( - result_dict["MACAddress"].replace(":", "")) - - self._ConvertIPs([("IPAddress", "addresses"), - ("DefaultIPGateway", "ip_gateway_list"), - ("DHCPServer", "dhcp_server_list")], result_dict, args) - - if "DHCPLeaseExpires" in result_dict: - args["dhcp_lease_expires"] = self.WMITimeStrToRDFDatetime( - result_dict["DHCPLeaseExpires"]) - - if "DHCPLeaseObtained" in result_dict: - args["dhcp_lease_obtained"] = self.WMITimeStrToRDFDatetime( - result_dict["DHCPLeaseObtained"]) - - yield rdf_client_network.Interface(**args) - - yield rdf_client_network.DNSClientConfiguration( - dns_server=result_dict["DNSServerSearchOrder"], - dns_suffix=result_dict["DNSDomainSuffixSearchOrder"]) diff --git a/grr/core/grr_response_core/lib/parsers/wmi_parser_test.py b/grr/core/grr_response_core/lib/parsers/wmi_parser_test.py index 235e18c836..8ad6632cf2 100644 --- a/grr/core/grr_response_core/lib/parsers/wmi_parser_test.py +++ b/grr/core/grr_response_core/lib/parsers/wmi_parser_test.py @@ -1,71 +1,17 @@ #!/usr/bin/env python """Tests for grr.parsers.wmi_parser.""" - - -import platform -import unittest - from absl import app from grr_response_core.lib.parsers import wmi_parser from grr_response_core.lib.rdfvalues import anomaly as rdf_anomaly -from grr_response_core.lib.rdfvalues import client_network as rdf_client_network from grr_response_core.lib.rdfvalues import protodict as rdf_protodict from grr_response_core.lib.rdfvalues import wmi as rdf_wmi -from grr.test_lib import client_test_lib from grr.test_lib import flow_test_lib from grr.test_lib import test_lib class WMIParserTest(flow_test_lib.FlowTestsBaseclass): - @unittest.skipIf( - platform.system() == "Darwin", - ("IPv6 address strings are cosmetically slightly different on OS X, " - "and we only expect this parsing code to run on Linux or maybe Windows")) - def testInterfaceParsing(self): - parser = wmi_parser.WMIInterfacesParser() - rdf_dict = rdf_protodict.Dict() - mock_config = client_test_lib.WMIWin32NetworkAdapterConfigurationMock - wmi_properties = mock_config.__dict__.items() - for key, value in wmi_properties: - if not key.startswith("__"): - try: - rdf_dict[key] = value - except TypeError: - rdf_dict[key] = "Failed to encode: %s" % value - - result_list = list(parser.ParseMultiple([rdf_dict])) - self.assertLen(result_list, 2) - for result in result_list: - if isinstance(result, rdf_client_network.Interface): - self.assertLen(result.addresses, 4) - self.assertCountEqual( - [x.human_readable_address for x in result.addresses], [ - "192.168.1.20", "ffff::ffff:aaaa:1111:aaaa", - "dddd:0:8888:6666:bbbb:aaaa:eeee:bbbb", - "dddd:0:8888:6666:bbbb:aaaa:ffff:bbbb" - ]) - - self.assertCountEqual( - [x.human_readable_address for x in result.dhcp_server_list], - ["192.168.1.1"]) - - self.assertEqual(result.dhcp_lease_expires.AsMicrosecondsSinceEpoch(), - 1409008979123456) - self.assertEqual(result.dhcp_lease_obtained.AsMicrosecondsSinceEpoch(), - 1408994579123456) - - elif isinstance(result, rdf_client_network.DNSClientConfiguration): - self.assertCountEqual( - result.dns_server, - ["192.168.1.1", "192.168.255.81", "192.168.128.88"]) - - self.assertCountEqual(result.dns_suffix, [ - "blah.example.com", "ad.example.com", "internal.example.com", - "example.com" - ]) - def testWMIActiveScriptEventConsumerParser(self): parser = wmi_parser.WMIActiveScriptEventConsumerParser() rdf_dict = rdf_protodict.Dict() diff --git a/grr/core/grr_response_core/lib/rdfvalue.py b/grr/core/grr_response_core/lib/rdfvalue.py index 6ad5e0af82..5708cd0f96 100644 --- a/grr/core/grr_response_core/lib/rdfvalue.py +++ b/grr/core/grr_response_core/lib/rdfvalue.py @@ -23,6 +23,7 @@ import dateutil from dateutil import parser +from google.protobuf import timestamp_pb2 from grr_response_core.lib import registry from grr_response_core.lib import utils from grr_response_core.lib.util import precondition @@ -563,6 +564,22 @@ def FromDatetime(cls, value): return cls((seconds * cls.converter) + (value.microsecond * cls.converter // 1000000)) + @classmethod + def FromProtoTimestamp( + cls, + timestamp: timestamp_pb2.Timestamp, + ) -> "RDFDatetime": + """Converts Protocol Buffers `Timestamp` instances to datetime objects. + + Args: + timestamp: A Protocol Buffers `Timestamp` instance to convert. + + Returns: + A corresponding RDF datetime object. + """ + micros = timestamp.seconds * 1_000_000 + timestamp.nanos // 1_000 + return RDFDatetime.FromMicrosecondsSinceEpoch(micros) + @classmethod def FromDate(cls, value): seconds = calendar.timegm(value.timetuple()) @@ -593,6 +610,12 @@ def Lerp(cls, t, start_time, end_time): return cls(round((1 - t) * start_time._value + t * end_time._value)) # pylint: disable=protected-access + @classmethod + def EarliestDatabaseSafeValue(cls): + """Returns the earliest datetime supported by all database backends.""" + # See https://bugs.mysql.com/77232 + return cls(1000000) + def __add__(self, other): # TODO(hanuszczak): Disallow `float` initialization. if isinstance(other, (int, float)): diff --git a/grr/core/grr_response_core/lib/rdfvalue_test.py b/grr/core/grr_response_core/lib/rdfvalue_test.py index a6addbca6a..5e9091c60d 100644 --- a/grr/core/grr_response_core/lib/rdfvalue_test.py +++ b/grr/core/grr_response_core/lib/rdfvalue_test.py @@ -9,6 +9,7 @@ from absl import app from absl.testing import absltest +from google.protobuf import timestamp_pb2 from grr_response_core.lib import rdfvalue from grr.test_lib import test_lib @@ -224,6 +225,29 @@ def testAsDatetimeUTC_Normal(self): self.assertEqual(py_datetime.hour, 8) self.assertEqual(py_datetime.minute, 0) + def testFromTimestampProto_Now(self): + pre_datetime = rdfvalue.RDFDatetime.Now() + + timestamp = timestamp_pb2.Timestamp() + timestamp.GetCurrentTime() + + post_datetime = rdfvalue.RDFDatetime.Now() + + timestamp_datetime = rdfvalue.RDFDatetime.FromProtoTimestamp(timestamp) + self.assertBetween(timestamp_datetime, pre_datetime, post_datetime) + + def testFromTimestampProto_Seconds(self): + timestamp = timestamp_pb2.Timestamp(seconds=1337) + + timestamp_datetime = rdfvalue.RDFDatetime.FromProtoTimestamp(timestamp) + self.assertEqual(timestamp_datetime.AsSecondsSinceEpoch(), 1337) + + def testFromTimestampProto_Nanos(self): + timestamp = timestamp_pb2.Timestamp(nanos=7654321) + + timestamp_datetime = rdfvalue.RDFDatetime.FromProtoTimestamp(timestamp) + self.assertEqual(timestamp_datetime.AsMicrosecondsSinceEpoch(), 7654) + class RDFDatetimeSecondsTest(absltest.TestCase): diff --git a/grr/core/grr_response_core/lib/rdfvalues/apple_firmware.py b/grr/core/grr_response_core/lib/rdfvalues/apple_firmware.py deleted file mode 100644 index 0e7fb713d8..0000000000 --- a/grr/core/grr_response_core/lib/rdfvalues/apple_firmware.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python -"""Locally defined rdfvalues.""" - - -from grr_response_core.lib.rdfvalues import client_action as rdf_client_action -from grr_response_core.lib.rdfvalues import paths as rdf_paths -from grr_response_core.lib.rdfvalues import structs as rdf_structs -from grr_response_proto import apple_firmware_pb2 - - -class EficheckConfig(rdf_structs.RDFProtoStruct): - """A request to eficheck to collect the EFI hashes.""" - protobuf = apple_firmware_pb2.EficheckConfig - - -class CollectEfiHashesResponse(rdf_structs.RDFProtoStruct): - """A response from eficheck with the collected hashes.""" - protobuf = apple_firmware_pb2.CollectEfiHashesResponse - rdf_deps = [ - rdf_client_action.ExecuteBinaryResponse, - ] - - -class DumpEfiImageResponse(rdf_structs.RDFProtoStruct): - """A response from eficheck with the flash image.""" - protobuf = apple_firmware_pb2.DumpEfiImageResponse - rdf_deps = [ - rdf_client_action.ExecuteBinaryResponse, - rdf_paths.PathSpec, - ] - - -class EficheckFlowArgs(rdf_structs.RDFProtoStruct): - """Flow argument to dump the EFI image or collect its hashes.""" - protobuf = apple_firmware_pb2.EficheckFlowArgs - - -class EfiEntry(rdf_structs.RDFProtoStruct): - """An EfiEntry.""" - protobuf = apple_firmware_pb2.EfiEntry - - -class EfiCollection(rdf_structs.RDFProtoStruct): - """An EfiCollection as forwarded for verification.""" - protobuf = apple_firmware_pb2.EfiCollection - rdf_deps = [ - EfiEntry, - ] diff --git a/grr/core/grr_response_core/lib/rdfvalues/artifacts.py b/grr/core/grr_response_core/lib/rdfvalues/artifacts.py index 2a89a1dca1..9d87ac64b4 100644 --- a/grr/core/grr_response_core/lib/rdfvalues/artifacts.py +++ b/grr/core/grr_response_core/lib/rdfvalues/artifacts.py @@ -305,7 +305,7 @@ def ToJson(self): def ToDict(self): return self.ToPrimitiveDict() - def ToPrimitiveDict(self): + def ToPrimitiveDict(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks """Handle dict generation specifically for Artifacts.""" artifact_dict = super().ToPrimitiveDict() diff --git a/grr/core/grr_response_core/lib/rdfvalues/client_action.py b/grr/core/grr_response_core/lib/rdfvalues/client_action.py index f9b771c8c5..f30200540f 100644 --- a/grr/core/grr_response_core/lib/rdfvalues/client_action.py +++ b/grr/core/grr_response_core/lib/rdfvalues/client_action.py @@ -51,22 +51,6 @@ class ExecuteResponse(rdf_structs.RDFProtoStruct): ] -class SendFileRequest(rdf_structs.RDFProtoStruct): - """Arguments for the `SendFile` action.""" - - protobuf = jobs_pb2.SendFileRequest - rdf_deps = [ - rdf_crypto.AES128Key, - rdf_paths.PathSpec, - ] - - def Validate(self): - self.pathspec.Validate() - - if not self.host: - raise ValueError("A host must be specified.") - - class Iterator(rdf_structs.RDFProtoStruct): """An Iterated client action is one which can be resumed on the client.""" protobuf = jobs_pb2.Iterator diff --git a/grr/core/grr_response_core/lib/rdfvalues/crypto.py b/grr/core/grr_response_core/lib/rdfvalues/crypto.py index 50b2967854..43a083d36a 100644 --- a/grr/core/grr_response_core/lib/rdfvalues/crypto.py +++ b/grr/core/grr_response_core/lib/rdfvalues/crypto.py @@ -684,21 +684,6 @@ def RawBytes(self): return self._value -# TODO(amoser): Size is now flexible, this class makes no sense anymore. -class AES128Key(EncryptionKey): - length = 128 - - -class AutoGeneratedAES128Key(AES128Key): - """Like AES128Key, but its UI edit box is prefilled with generated key.""" - - def __init__(self, initializer=None, **kwargs): - if isinstance(initializer, AES128Key): - super().__init__(initializer=initializer.RawBytes(), **kwargs) - else: - super().__init__(initializer=initializer, **kwargs) - - class StreamingCBCEncryptor(object): """A class to stream data to a CBCCipher object.""" diff --git a/grr/core/grr_response_core/lib/rdfvalues/crypto_test.py b/grr/core/grr_response_core/lib/rdfvalues/crypto_test.py index 7c5bbbde06..e0cb43a894 100644 --- a/grr/core/grr_response_core/lib/rdfvalues/crypto_test.py +++ b/grr/core/grr_response_core/lib/rdfvalues/crypto_test.py @@ -177,8 +177,8 @@ def testRSAPrivate(self): class CryptoUtilTest(CryptoTestBase): def testStreamingCBCEncryptor(self): - key = rdf_crypto.AES128Key.GenerateKey() - iv = rdf_crypto.AES128Key.GenerateKey() + key = rdf_crypto.EncryptionKey.GenerateKey() + iv = rdf_crypto.EncryptionKey.GenerateKey() # 160 characters. message = b"Hello World!!!!!" * 10 @@ -214,23 +214,23 @@ def testStreamingCBCEncryptor(self): self.assertEqual(cipher.Decrypt(b"".join(out)), plaintext) - def testAES128Key(self): - key = rdf_crypto.AES128Key.GenerateKey() - iv = rdf_crypto.AES128Key.GenerateKey() + def testEncryptionKey(self): + key = rdf_crypto.EncryptionKey.GenerateKey() + iv = rdf_crypto.EncryptionKey.GenerateKey() self.assertNotEqual(key, iv) self.assertNotEqual(key.RawBytes(), iv.RawBytes()) # This key is too short. - self.assertRaises(rdf_crypto.CipherError, rdf_crypto.AES128Key, b"foo") + self.assertRaises(rdf_crypto.CipherError, rdf_crypto.EncryptionKey, b"foo") - copied_key = rdf_crypto.AES128Key(key.RawBytes()) + copied_key = rdf_crypto.EncryptionKey(key.RawBytes()) self.assertEqual(copied_key, key) self.assertEqual(copied_key.RawBytes(), key.RawBytes()) def testAES128CBCCipher(self): - key = rdf_crypto.AES128Key.GenerateKey() - iv = rdf_crypto.AES128Key.GenerateKey() + key = rdf_crypto.EncryptionKey.GenerateKey() + iv = rdf_crypto.EncryptionKey.GenerateKey() cipher = rdf_crypto.AES128CBCCipher(key, iv) @@ -243,8 +243,8 @@ def testAES128CBCCipher(self): self.assertNotEqual(cipher_text, plain_text) self.assertEqual(cipher.Decrypt(cipher_text), plain_text) - key2 = rdf_crypto.AES128Key.GenerateKey() - iv2 = rdf_crypto.AES128Key.GenerateKey() + key2 = rdf_crypto.EncryptionKey.GenerateKey() + iv2 = rdf_crypto.EncryptionKey.GenerateKey() cipher = rdf_crypto.AES128CBCCipher(key, iv2) self.assertRaises(rdf_crypto.CipherError, cipher.Decrypt, plain_text) cipher = rdf_crypto.AES128CBCCipher(key2, iv) diff --git a/grr/core/grr_response_core/lib/rdfvalues/dummy.py b/grr/core/grr_response_core/lib/rdfvalues/dummy.py new file mode 100644 index 0000000000..3bd49b54e7 --- /dev/null +++ b/grr/core/grr_response_core/lib/rdfvalues/dummy.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +"""The various Dummy example rdfvalues.""" + +from grr_response_core.lib.rdfvalues import structs as rdf_structs +from grr_response_proto import dummy_pb2 + + +class DummyRequest(rdf_structs.RDFProtoStruct): + """Request for Dummy action.""" + + protobuf = dummy_pb2.DummyRequest + rdf_deps = [] + + +class DummyResult(rdf_structs.RDFProtoStruct): + """Result for Dummy action.""" + + protobuf = dummy_pb2.DummyResult + rdf_deps = [] diff --git a/grr/core/grr_response_core/lib/rdfvalues/flows.py b/grr/core/grr_response_core/lib/rdfvalues/flows.py index 773b3b0b69..1fad73e2d1 100644 --- a/grr/core/grr_response_core/lib/rdfvalues/flows.py +++ b/grr/core/grr_response_core/lib/rdfvalues/flows.py @@ -248,25 +248,6 @@ class ClientCommunication(rdf_structs.RDFProtoStruct): num_messages = 0 -class ClientActionRequest(rdf_structs.RDFProtoStruct): - """The request that gets sent to the client.""" - protobuf = flows_pb2.ClientActionRequest - rdf_deps = [ - rdfvalue.Duration, - ] - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - if not self.HasField("cpu_limit_ms"): - self.cpu_limit_ms = 3600000 - if not self.HasField("runtime_limit_us"): - self.runtime_limit_us = 1000 * self.cpu_limit_ms - - if not self.HasField("network_bytes_limit"): - self.network_bytes_limit = 10737418240 - - class EmptyFlowArgs(rdf_structs.RDFProtoStruct): """Some flows do not take argumentnts.""" protobuf = flows_pb2.EmptyFlowArgs diff --git a/grr/core/grr_response_core/lib/rdfvalues/standard.py b/grr/core/grr_response_core/lib/rdfvalues/standard.py index cb36c0608a..f23f4ee8ae 100644 --- a/grr/core/grr_response_core/lib/rdfvalues/standard.py +++ b/grr/core/grr_response_core/lib/rdfvalues/standard.py @@ -134,4 +134,4 @@ def SerializeToBytes(self) -> bytes: def SerializeToHumanReadable(self) -> Text: parts = (self.transport, self.host, self.path, self.query, self.fragment) - return urlparse.urlunsplit(parts) + return urlparse.urlunsplit(parts) # pytype: disable=bad-return-type diff --git a/grr/core/grr_response_core/lib/rdfvalues/structs.py b/grr/core/grr_response_core/lib/rdfvalues/structs.py index 7491c267ce..3b62e21505 100644 --- a/grr/core/grr_response_core/lib/rdfvalues/structs.py +++ b/grr/core/grr_response_core/lib/rdfvalues/structs.py @@ -331,7 +331,7 @@ def GetDefault(self, container=None): _ = container return self.default - def Validate(self, value, **_) -> Text: + def Validate(self, value, **_) -> Text: # pytype: disable=signature-mismatch # overriding-parameter-count-checks """Validates a python format representation of the value.""" if isinstance(value, rdfvalue.RDFString): # TODO(hanuszczak): Use `str` here. @@ -394,7 +394,7 @@ def __init__(self, default=b"", **kwargs): if default is not None: self.default = default - def Validate(self, value, **_): + def Validate(self, value, **_): # pytype: disable=signature-mismatch # overriding-parameter-count-checks if not isinstance(value, bytes): raise type_info.TypeValueError("Required bytes, got %r" % value) @@ -439,7 +439,7 @@ def ConvertFromWireFormat(self, value, container=None): def ConvertToWireFormat(self, value): return (self.encoded_tag, b"", VarintEncode(value)) - def Validate(self, value, **_): + def Validate(self, value, **_): # pytype: disable=signature-mismatch # overriding-parameter-count-checks try: return int(value) except ValueError: @@ -878,7 +878,7 @@ def GetDefault(self, container=None): """When a nested proto is accessed, default to an empty one.""" return self.type() - def Validate(self, value, **_): + def Validate(self, value, **_): # pytype: disable=signature-mismatch # overriding-parameter-count-checks if isinstance(value, str): raise type_info.TypeValueError("Field %s must be of type %s" % (self.name, self.type.__name__)) @@ -1177,6 +1177,7 @@ def Extend(self, iterable): append = utils.Proxy("Append") remove = utils.Proxy("Remove") + extend = utils.Proxy("Extend") def __getitem__(self, item): # Ensure we handle slices as well. @@ -1289,7 +1290,7 @@ def GetDefault(self, container=None): return RepeatedFieldHelper( type_descriptor=self.delegate, container=container) - def Validate(self, value, **_): + def Validate(self, value, **_): # pytype: disable=signature-mismatch # overriding-parameter-count-checks """Check that value is a list of the required type.""" # Assigning from same kind can allow us to skip verification since all # elements in a RepeatedFieldHelper already are coerced to the delegate @@ -1478,7 +1479,7 @@ def Definition(self): return ("\n // Semantic Type: %s" % self.type.__name__) + self.primitive_desc.Definition() - def Validate(self, value, **_): + def Validate(self, value, **_): # pytype: disable=signature-mismatch # overriding-parameter-count-checks # Try to coerce into the correct type: if value.__class__ is not self.type: try: diff --git a/grr/core/grr_response_core/lib/util/cache.py b/grr/core/grr_response_core/lib/util/cache.py index 4fafac5f1d..eeeacb11e9 100644 --- a/grr/core/grr_response_core/lib/util/cache.py +++ b/grr/core/grr_response_core/lib/util/cache.py @@ -4,13 +4,20 @@ import functools import logging import threading +from typing import Any, Callable, TypeVar from grr_response_core.lib import rdfvalue WITH_LIMITED_CALL_FREQUENCY_PASS_THROUGH = False +_F = TypeVar("_F", bound=Callable[..., Any]) -def WithLimitedCallFrequency(min_time_between_calls: rdfvalue.Duration): +_FVoid = TypeVar("_FVoid", bound=Callable[..., None]) + + +def WithLimitedCallFrequency( + min_time_between_calls: rdfvalue.Duration, +) -> Callable[[_F], _F]: """Function call rate-limiting decorator. This decorator ensures that the wrapped function will be called at most @@ -49,13 +56,48 @@ def Foo(id): A Python function decorator. """ - def Decorated(f): + def Decorated(f: _F) -> _F: """Actual decorator implementation.""" lock = threading.RLock() prev_times = {} prev_results = {} result_locks = {} + prev_cleanup_time = rdfvalue.RDFDatetime.Now() + + def CleanUpCache(now: rdfvalue.RDFDatetime, min_time: rdfvalue.Duration): + """Cleans up the cache from stale entries.""" + nonlocal prev_cleanup_time + if now < prev_cleanup_time: + logging.warning( + "Current timestamp %s is before the previous cache cleaning time" + " %s, hoping we're inside the test", + now, + prev_cleanup_time, + ) + prev_cleanup_time = now + return + + if (now - prev_cleanup_time) < min_time: + return + + for k, prev_time in list(prev_times.items()): + if prev_time > now: + # We have a result from the future, hopefully this is a test... + logging.warning( + "Deleting cached function result from the future (%s > %s)", + prev_time, + now, + ) + prev_times.pop(k) + prev_results.pop(k, None) + result_locks.pop(k, None) + elif now - prev_time >= min_time: + prev_times.pop(k) + prev_results.pop(k, None) + result_locks.pop(k, None) + + prev_cleanup_time = now @functools.wraps(f) def Fn(*args, **kwargs): @@ -71,24 +113,12 @@ def Fn(*args, **kwargs): now = rdfvalue.RDFDatetime.Now() with lock: - for k, prev_time in list(prev_times.items()): - if prev_time > now: - # We have a result from the future, hopefully this is a test... - logging.warning( - "Deleting cached function result from the future (%s > %s)", - prev_time, now) - prev_times.pop(k) - prev_results.pop(k, None) - result_locks.pop(k, None) - elif now - prev_time >= min_time: - prev_times.pop(k) - prev_results.pop(k, None) - result_locks.pop(k, None) + CleanUpCache(now, min_time) try: - # We eliminated all the old entries, so if the key is present - # in the cache, it means that the data is fresh enough to be used. - return prev_results[key] + prev_time = prev_times[key] + if now - prev_time < min_time: + return prev_results[key] except KeyError: prev_time = None @@ -111,6 +141,159 @@ def Fn(*args, **kwargs): else: return prev_results[key] + def _DebugInternalState(): + return dict( + prev_times=prev_times, + prev_results=prev_results, + result_locks=result_locks, + prev_cleanup_time=prev_cleanup_time, + ) + + # This is used by the tests to ensure that the internal representation + # behaves as expected. + Fn._DebugInternalState = ( # pylint: disable=protected-access + _DebugInternalState + ) + + return Fn + + return Decorated + + +def WithLimitedCallFrequencyWithoutReturnValue( + min_time_between_calls: rdfvalue.Duration, +) -> Callable[[_FVoid], _FVoid]: + """Function call rate-limiting decorator for None-returning functions. + + This decorator ensures that the wrapped function will be called at most + once in min_time_between_calls time for the same set of arguments. Given + that the wrapped function is not expected to return a value, all excessive + calls will be dropped immediately, even if a parallel ongoing + call for the same set of arguments is in progress in another thread. + + Suppose we use the decorator like this: + @cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration.From(30, rdfvalue.SECONDS)) + def Foo(id): + ... + + If Foo(42) is called and then Foo(42) is called again within 30 seconds, then + the second call will simply return immediately. + + If Foo(42) is called and then Foo(43) is called within 30 seconds, the + wrapped function will be properly called in both cases, since these Foo calls + have different arguments sets. + + If Foo(42) is called and takes a long time to finish, and another + Foo(42) call is done in another thread, then the latter call will return + immediately. + + NOTE 1: this function becomes a trivial pass-through and does no caching if + module-level WITH_LIMITED_CALL_FREQUENCY_PASS_THROUGH variable is set to + True. This is used in testing. + + NOTE 2: all decorated functions' arguments have to be hashable. + + Args: + min_time_between_calls: An rdfvalue.Duration specifying the minimal time to + pass between 2 consecutive function calls with same arguments. + + Returns: + A Python function decorator. + """ + + def Decorated(f: _FVoid) -> _FVoid: + """Actual decorator implementation.""" + + lock = threading.RLock() + prev_times = {} + in_progress_locks = {} + prev_cleanup_time = rdfvalue.RDFDatetime.Now() + + def CleanUpCache(now: rdfvalue.RDFDatetime, min_time: rdfvalue.Duration): + """Cleans up the cache from stale entries.""" + nonlocal prev_cleanup_time + if now < prev_cleanup_time: + logging.warning( + "Current timestamp %s is before the previous cache cleaning time" + " %s, hoping we're inside the test", + now, + prev_cleanup_time, + ) + prev_cleanup_time = now + return + + if (now - prev_cleanup_time) < min_time: + return + + for k, prev_time in list(prev_times.items()): + if prev_time > now: + # We have a result from the future, hopefully this is a test... + logging.warning( + "Deleting cached function result from the future (%s > %s)", + prev_time, + now, + ) + prev_times.pop(k) + in_progress_locks.pop(k, None) + elif now - prev_time >= min_time: + prev_times.pop(k) + in_progress_locks.pop(k, None) + + prev_cleanup_time = now + + @functools.wraps(f) + def Fn(*args, **kwargs): + """Wrapper around the decorated function.""" + + if WITH_LIMITED_CALL_FREQUENCY_PASS_THROUGH: + # This effectively turns off the caching. + min_time = rdfvalue.Duration(0) + else: + min_time = min_time_between_calls + + key = (args, tuple(sorted(kwargs.items()))) + now = rdfvalue.RDFDatetime.Now() + + with lock: + CleanUpCache(now, min_time) + + try: + prev_time = prev_times[key] + if now - prev_time < min_time: + return + except KeyError: + pass + + try: + in_progress_lock = in_progress_locks[key] + except KeyError: + in_progress_lock = threading.RLock() + in_progress_locks[key] = in_progress_lock + + if in_progress_lock.acquire(blocking=False): + try: + r = f(*args, **kwargs) + assert r is None, "Wrapped function should have no return value" + + with lock: + prev_times[key] = rdfvalue.RDFDatetime.Now() + finally: + in_progress_lock.release() + + def _DebugInternalState(): + return dict( + prev_times=prev_times, + in_progress_locks=in_progress_locks, + prev_cleanup_time=prev_cleanup_time, + ) + + # This is used by the tests to ensure that the internal representation + # behaves as expected. + Fn._DebugInternalState = ( # pylint: disable=protected-access + _DebugInternalState + ) + return Fn return Decorated diff --git a/grr/core/grr_response_core/lib/util/cache_test.py b/grr/core/grr_response_core/lib/util/cache_test.py index 214a0d5492..2eedffea22 100644 --- a/grr/core/grr_response_core/lib/util/cache_test.py +++ b/grr/core/grr_response_core/lib/util/cache_test.py @@ -25,6 +25,39 @@ def testCallsFunctionEveryTimeWhenMinTimeBetweenCallsZero(self): self.assertEqual(self.mock_fn.call_count, 10) + def testCacheAlwaysContainsOnlySingleItemWhenMinTimeBetweenCallsZero(self): + decorated = cache.WithLimitedCallFrequency(rdfvalue.Duration(0))( + self.mock_fn + ) + for i in range(10): + decorated(i) + + self.assertLen(decorated._DebugInternalState()["prev_results"], 1) + + def testCacheIsNotCleanedIfMinTimeBetweenCallsHasNotElapsed(self): + decorated = cache.WithLimitedCallFrequency( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(self.mock_fn) + for i in range(10): + decorated(i) + + with test_lib.FakeTime( + rdfvalue.RDFDatetime.Now() + + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + ): + decorated(11) + + self.assertLen(decorated._DebugInternalState()["prev_results"], 1) + + def testCacheIsCleanedAfterMinTimeBetweenCallsHasElapsed(self): + decorated = cache.WithLimitedCallFrequency( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(self.mock_fn) + for i in range(10): + decorated(i) + + self.assertLen(decorated._DebugInternalState()["prev_results"], 10) + def testCallsFunctionOnceInGivenTimeRangeWhenMinTimeBetweenCallsNonZero(self): decorated = cache.WithLimitedCallFrequency( rdfvalue.Duration.From(30, rdfvalue.SECONDS))( @@ -172,8 +205,224 @@ def testExceptionIsNotCached(self): self.assertEqual(mock_fn.call_count, 10) - # TODO(user): add a test case for a cace when non-hashable arguments are - # passed. + def testRaisesOnUnhashableArguments(self): + mock_fn = mock.Mock(side_effect=ValueError()) + + decorated = cache.WithLimitedCallFrequency( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(mock_fn) + + with self.assertRaisesRegex(TypeError, "unhashable type"): + decorated(dict(foo="bar")) + + +class WithLimitedCallFrequencyWithoutReturnValueTest(absltest.TestCase): + + def setUp(self): + super().setUp() + self.mock_fn = mock.Mock(wraps=lambda *_: None) + self.mock_fn.__name__ = "foo" # Expected by functools.wraps. + + def testCallsFunctionEveryTimeWhenMinTimeBetweenCallsZero(self): + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration(0) + )(self.mock_fn) + for _ in range(10): + decorated() + + self.assertEqual(self.mock_fn.call_count, 10) + + def testCacheAlwaysContainsOnlySingleItemWhenMinTimeBetweenCallsZero(self): + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration(0) + )(self.mock_fn) + for i in range(10): + decorated(i) + + self.assertLen(decorated._DebugInternalState()["prev_times"], 1) + + def testCacheIsNotCleanedIfMinTimeBetweenCallsHasNotElapsed(self): + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(self.mock_fn) + for i in range(10): + decorated(i) + + with test_lib.FakeTime( + rdfvalue.RDFDatetime.Now() + + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + ): + decorated(11) + + self.assertLen(decorated._DebugInternalState()["prev_times"], 1) + + def testCacheIsCleanedAfterMinTimeBetweenCallsHasElapsed(self): + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(self.mock_fn) + for i in range(10): + decorated(i) + + self.assertLen(decorated._DebugInternalState()["prev_times"], 10) + + def testCallsFunctionOnceInGivenTimeRangeWhenMinTimeBetweenCallsNonZero(self): + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(self.mock_fn) + + now = rdfvalue.RDFDatetime.Now() + with test_lib.FakeTime(now): + decorated() + + with test_lib.FakeTime(now + rdfvalue.Duration.From(15, rdfvalue.SECONDS)): + decorated() + + self.assertEqual(self.mock_fn.call_count, 1) + + with test_lib.FakeTime(now + rdfvalue.Duration.From(30, rdfvalue.SECONDS)): + decorated() + + self.assertEqual(self.mock_fn.call_count, 2) + + def testCachingIsDonePerArguments(self): + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(self.mock_fn) + + now = rdfvalue.RDFDatetime.Now() + with test_lib.FakeTime(now): + decorated(1) + decorated(2) + + self.assertEqual(self.mock_fn.call_count, 2) + + with test_lib.FakeTime(now + rdfvalue.Duration.From(15, rdfvalue.SECONDS)): + decorated(1) + decorated(2) + + self.assertEqual(self.mock_fn.call_count, 2) + + with test_lib.FakeTime(now + rdfvalue.Duration.From(30, rdfvalue.SECONDS)): + decorated(1) + decorated(2) + + self.assertEqual(self.mock_fn.call_count, 4) + + def testDecoratedFunctionIsNotExecutedConcurrently(self): + event = threading.Event() + + # Can't rely on mock's call_count as it's not thread safe. + fn_calls = [] + + def Fn(): + fn_calls.append(True) + event.wait() + return self.mock_fn() + + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(Fn) + + results = [] + + def T(): + results.append(decorated()) + + threads = [] + for _ in range(10): + t = threading.Thread(target=T) + t.start() + threads.append(t) + + # At this point all threads should be waiting on the function to complete, + # with only 1 threads actually executing the function. Trigger the event + # to force that one thread to complete. + event.set() + + for t in threads: + t.join() + + self.assertLen(results, len(threads)) + self.assertLen(fn_calls, 1) + + def testDecoratedFunctionsAreNotWaitedForPerArguments(self): + event = threading.Event() + + # Can't rely on mock's call_count as it's not thread safe. + fn_calls = [] + + def Fn(x): + fn_calls.append(x) + # Wait if this is the first call. + if not fn_calls: + event.wait() + + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(Fn) + + def T(): + decorated(1) + + t = threading.Thread(target=T) + t.start() + try: + # This should return immediately, as the wrapped function has no return + # value and thus doesn't have to block even if the other one is in + # progress. + decorated(1) + finally: + event.set() + t.join() + + self.assertLen(fn_calls, 1) + + def testPropagatesExceptions(self): + mock_fn = mock.Mock(side_effect=ValueError()) + mock_fn.__name__ = "foo" # Expected by functools.wraps. + + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(mock_fn) + + with self.assertRaises(ValueError): + decorated() + + def testExceptionIsNotCached(self): + mock_fn = mock.Mock(side_effect=ValueError()) + mock_fn.__name__ = "foo" # Expected by functools.wraps. + + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(mock_fn) + + for _ in range(10): + with self.assertRaises(ValueError): + decorated() + + self.assertEqual(mock_fn.call_count, 10) + + def testRaisesOnUnhashableArguments(self): + mock_fn = mock.Mock(side_effect=ValueError()) + + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(mock_fn) + + with self.assertRaisesRegex(TypeError, "unhashable type"): + decorated(dict(foo="bar")) + + def testRaisesIfWrappedFunctionReturnsValue(self): + mock_fn = mock.Mock(return_value=42) + + decorated = cache.WithLimitedCallFrequencyWithoutReturnValue( + rdfvalue.Duration.From(30, rdfvalue.SECONDS) + )(mock_fn) + + with self.assertRaisesRegex( + AssertionError, "Wrapped function should have no return value" + ): + decorated("blah") if __name__ == "__main__": diff --git a/grr/core/grr_response_core/lib/util/filesystem.py b/grr/core/grr_response_core/lib/util/filesystem.py index 8125d0d1ac..04328350e8 100644 --- a/grr/core/grr_response_core/lib/util/filesystem.py +++ b/grr/core/grr_response_core/lib/util/filesystem.py @@ -99,13 +99,28 @@ def GetSize(self) -> int: return self._stat.st_size def GetAccessTime(self) -> int: - return _NanosecondsToMicroseconds(self._stat.st_atime_ns) + # st_atime_ns is a higher-precision version of st_atime. Use it if it's + # present. + if self._stat.st_atime_ns is not None: + return _NanosecondsToMicroseconds(self._stat.st_atime_ns) + else: + return _SecondsToMicroseconds(self._stat.st_atime.AsSecondsSinceEpoch()) def GetModificationTime(self) -> int: - return _NanosecondsToMicroseconds(self._stat.st_mtime_ns) + # st_mtime_ns is a higher-precision version of st_mtime. Use it if it's + # present. + if self._stat.st_mtime_ns is not None: + return _NanosecondsToMicroseconds(self._stat.st_mtime_ns) + else: + return _SecondsToMicroseconds(self._stat.st_mtime.AsSecondsSinceEpoch()) def GetChangeTime(self) -> int: - return _NanosecondsToMicroseconds(self._stat.st_ctime_ns) + # st_ctime_ns is a higher-precision version of st_ctime. Use it if it's + # present. + if self._stat.st_ctime_ns is not None: + return _NanosecondsToMicroseconds(self._stat.st_ctime_ns) + else: + return _SecondsToMicroseconds(self._stat.st_ctime.AsSecondsSinceEpoch()) def GetDevice(self) -> int: return self._stat.st_dev @@ -216,3 +231,8 @@ def Get(self, path: Text, follow_symlink: bool = True) -> Stat: def _NanosecondsToMicroseconds(ns: int) -> int: """Converts nanoseconds to microseconds.""" return ns // 1000 + + +def _SecondsToMicroseconds(ns: float) -> int: + """Converts seconds to microseconds.""" + return int(ns * 1e6) diff --git a/grr/core/grr_response_core/stats/stats_test_utils.py b/grr/core/grr_response_core/stats/stats_test_utils.py index a537c75710..cd1d7f758a 100644 --- a/grr/core/grr_response_core/stats/stats_test_utils.py +++ b/grr/core/grr_response_core/stats/stats_test_utils.py @@ -88,6 +88,31 @@ def testCounterWithFields(self): # Check that previously set values with other fields are not affected. self.assertEqual(7, counter.GetValue(fields=["dimension_value_1"])) + def testCounterWithBoolFields(self): + with self.SetUpStatsCollector(self._CreateStatsCollector()): + counter = metrics.Counter( + f"{self.testCounterWithBoolFields.__name__}_COUNTER", + fields=[("is_foo", bool)], + ) + + self.assertEqual(counter.GetValue([False]), 0) + self.assertEqual(counter.GetValue([True]), 0) + + counter.Increment(fields=[True]) + + self.assertEqual(counter.GetValue([False]), 0) + self.assertEqual(counter.GetValue([True]), 1) + + counter.Increment(fields=[False]) + + self.assertEqual(counter.GetValue([False]), 1) + self.assertEqual(counter.GetValue([True]), 1) + + counter.Increment(fields=[False]) + + self.assertEqual(counter.GetValue([False]), 2) + self.assertEqual(counter.GetValue([True]), 1) + def testSimpleGauge(self): with self.SetUpStatsCollector(self._CreateStatsCollector()): int_gauge = metrics.Gauge("testSimpleGauge_int_gauge", int) diff --git a/grr/core/grr_response_core/stats/stats_utils.py b/grr/core/grr_response_core/stats/stats_utils.py index 0213c3cc7a..52f8536938 100644 --- a/grr/core/grr_response_core/stats/stats_utils.py +++ b/grr/core/grr_response_core/stats/stats_utils.py @@ -93,13 +93,14 @@ def Decorated(*args, **kwargs): def FieldDefinitionProtosFromTuples(field_def_tuples): """Converts (field-name, type) tuples to MetricFieldDefinition protos.""" - # TODO: This needs fixing for Python 3. field_def_protos = [] for field_name, field_type in field_def_tuples: if field_type is int: field_type = rdf_stats.MetricFieldDefinition.FieldType.INT elif issubclass(field_type, Text): field_type = rdf_stats.MetricFieldDefinition.FieldType.STR + elif issubclass(field_type, bool): + field_type = rdf_stats.MetricFieldDefinition.FieldType.BOOL else: raise ValueError("Invalid field type: %s" % field_type) field_def_protos.append( @@ -110,14 +111,14 @@ def FieldDefinitionProtosFromTuples(field_def_tuples): def FieldDefinitionTuplesFromProtos(field_def_protos): """Converts MetricFieldDefinition protos to (field-name, type) tuples.""" - # TODO: This needs fixing for Python 3. field_def_tuples = [] for proto in field_def_protos: if proto.field_type == rdf_stats.MetricFieldDefinition.FieldType.INT: field_type = int elif proto.field_type == rdf_stats.MetricFieldDefinition.FieldType.STR: - # Use old style str in Python 2 here or the streamz library will break. field_type = str + elif proto.field_type == rdf_stats.MetricFieldDefinition.FieldType.BOOL: + field_type = bool else: raise ValueError("Unknown field type: %s" % proto.field_type) field_def_tuples.append((proto.field_name, field_type)) diff --git a/grr/proto/grr_response_proto/api/client.proto b/grr/proto/grr_response_proto/api/client.proto index ed09a89e1a..8b7ef7a7c7 100644 --- a/grr/proto/grr_response_proto/api/client.proto +++ b/grr/proto/grr_response_proto/api/client.proto @@ -69,26 +69,6 @@ message ApiClient { repeated string rrg_args = 23; } -message ApiClientActionRequest { - optional uint64 task_id = 1; - - optional uint64 leased_until = 2 [(sem_type) = { - type: "RDFDatetime", - description: "This task's lease expiration time." - }]; - - optional string session_id = 3 [(sem_type) = { - type: "RDFURN" - description: "Session id that triggered the request." - }]; - - optional string client_action = 4 - [(sem_type) = { description: "Requested client action name." }]; - - repeated GrrMessage responses = 5 - [(sem_type) = { description: "Responses queued for this request." }]; -} - // // Method arguments and results. // @@ -313,18 +293,6 @@ message ApiGetClientLoadStatsResult { repeated ApiStatsStoreMetricDataPoint data_points = 1; } -message ApiListClientActionRequestsArgs { - optional string client_id = 1 - [(sem_type) = { type: "ApiClientId", description: "Client id." }]; - optional bool fetch_responses = 2 [(sem_type) = { - description: "If true, fetch all the responses for every request." - }]; -} - -message ApiListClientActionRequestsResult { - repeated ApiClientActionRequest items = 1; -} - message ApiKillFleetspeakArgs { optional string client_id = 1 [(sem_type) = { type: "ApiClientId" }]; // If true, the fleetspeak process is terminated without performing diff --git a/grr/proto/grr_response_proto/api/user.proto b/grr/proto/grr_response_proto/api/user.proto index ade596f5b4..1610064802 100644 --- a/grr/proto/grr_response_proto/api/user.proto +++ b/grr/proto/grr_response_proto/api/user.proto @@ -441,10 +441,8 @@ message ApiCreateClientApprovalArgs { optional string client_id = 1 [(sem_type) = { type: "ApiClientId", description: "Client id." }]; optional ApiClientApproval approval = 2; - optional bool keep_client_alive = 3 [(sem_type) = { - description: "If true, the client will be kept alive for an hour right " - "after the approval request is sent." - }]; + + reserved 3; } message ApiGetClientApprovalArgs { diff --git a/grr/proto/grr_response_proto/apple_firmware.proto b/grr/proto/grr_response_proto/apple_firmware.proto deleted file mode 100644 index f5193f7131..0000000000 --- a/grr/proto/grr_response_proto/apple_firmware.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto2"; - -package grr; - -import "grr_response_proto/jobs.proto"; -import "grr_response_proto/semantic.proto"; - -message EficheckConfig { - reserved 1; - optional string cmd_path = 2; -} - -message CollectEfiHashesResponse { - optional string eficheck_version = 1; - optional string boot_rom_version = 2; - optional ExecuteBinaryResponse response = 3; -} - -message DumpEfiImageResponse { - optional string eficheck_version = 1; - optional PathSpec path = 2 [(sem_type) = { - description: "Temporary path to the flash image.", - }]; - optional ExecuteBinaryResponse response = 3; -} - -message EficheckFlowArgs { - reserved 1; - optional string cmd_path = 2 - [default = "/usr/libexec/firmwarecheckers/eficheck/eficheck"]; -} - -message EfiCollection { - optional string eficheck_version = 1 [(sem_type) = { - description: "Version of eficheck used to collect the data." - }]; - optional string boot_rom_version = 2 - [(sem_type) = { description: "Boot ROM version." }]; - repeated EfiEntry entries = 3 - [(sem_type) = { description: "The hashes of the collection." }]; -} - -message EfiEntry { - optional uint32 volume_type = 1 - [(sem_type) = { description: "The volume type." }]; - optional uint64 address = 2 - [(sem_type) = { description: "The entry address." }]; - optional uint32 size = 3 [(sem_type) = { description: "The entry size." }]; - optional string guid = 4 [(sem_type) = { description: "The EFI GUID." }]; - optional string hash = 5 [(sem_type) = { description: "The hash value." }]; - optional uint32 flags = 6 [(sem_type) = { - description: "A bit field of flags that describe the volume " - "(known=1, volatile=2, ffs=4)." - }]; - optional uint32 index = 7 - [(sem_type) = { description: "The entry's volume index." }]; -} diff --git a/grr/proto/grr_response_proto/crowdstrike.proto b/grr/proto/grr_response_proto/crowdstrike.proto new file mode 100644 index 0000000000..ee7fc8b579 --- /dev/null +++ b/grr/proto/grr_response_proto/crowdstrike.proto @@ -0,0 +1,12 @@ +syntax = "proto2"; + +package grr; + +// Result message for the `GetCrowdStrikeAgentID` flow. +message GetCrowdstrikeAgentIdResult { + // Retrieved identifier of the CrowdStrike agent. + // + // This value should generally be a hexadecimal string representing the bytes + // of the 128-bit identifier although no exact requirements are imposed. + optional string agent_id = 1; +} diff --git a/grr/proto/grr_response_proto/deprecated.proto b/grr/proto/grr_response_proto/deprecated.proto index 7e24f7ff10..70fb648624 100644 --- a/grr/proto/grr_response_proto/deprecated.proto +++ b/grr/proto/grr_response_proto/deprecated.proto @@ -7,6 +7,7 @@ */ syntax = "proto2"; +import "google/protobuf/any.proto"; import "grr_response_proto/anomaly.proto"; import "grr_response_proto/jobs.proto"; import "grr_response_proto/knowledge_base.proto"; @@ -495,3 +496,164 @@ message CollectSingleFileProgress { optional CollectSingleFileResult result = 2; optional string error_description = 3; } + +message ClientActionRequest { + optional string client_id = 1; + optional string flow_id = 2; + optional uint64 request_id = 3; + optional string action_identifier = 4; + optional google.protobuf.Any action_args = 5; + optional uint64 cpu_limit_ms = 6; + optional uint64 network_bytes_limit = 7; + optional uint64 runtime_limit_us = 52; +} + +message ApiClientActionRequest { + optional uint64 task_id = 1; + optional uint64 leased_until = 2; + optional string session_id = 3; + optional string client_action = 4; + repeated GrrMessage responses = 5; +} + +message ApiListClientActionRequestsArgs { + optional string client_id = 1; + optional bool fetch_responses = 2; +} + +message ApiListClientActionRequestsResult { + repeated ApiClientActionRequest items = 1; +} + +message UninstallArgs { + optional bool kill = 1 [(sem_type) = { + description: "Kills the client if set.", + }]; +} + +message UpdateClientArgs { + reserved 1; + optional string binary_path = 2 [(sem_type) = { + description: "Identifies the binary uploaded to GRR server that has " + "to be run on the client to perform the update.", + }]; +} + +message KeepAliveArgs { + optional uint64 duration = 1 [ + (sem_type) = { + type: "DurationSeconds", + description: "Until when should the client stay in the fast poll mode.", + }, + default = 3600 + ]; +} + +message CollectEfiHashesResponse { + optional string eficheck_version = 1; + optional string boot_rom_version = 2; + optional ExecuteBinaryResponse response = 3; +} + +message DumpEfiImageResponse { + optional string eficheck_version = 1; + optional PathSpec path = 2 [(sem_type) = { + description: "Temporary path to the flash image.", + }]; + optional ExecuteBinaryResponse response = 3; +} + +message EficheckConfig { + reserved 1; + optional string cmd_path = 2; +} + +message EficheckFlowArgs { + reserved 1; + optional string cmd_path = 2 + [default = "/usr/libexec/firmwarecheckers/eficheck/eficheck"]; +} + +message EfiCollection { + optional string eficheck_version = 1 [(sem_type) = { + description: "Version of eficheck used to collect the data." + }]; + optional string boot_rom_version = 2 + [(sem_type) = { description: "Boot ROM version." }]; + repeated EfiEntry entries = 3 + [(sem_type) = { description: "The hashes of the collection." }]; +} + +message EfiEntry { + optional uint32 volume_type = 1 + [(sem_type) = { description: "The volume type." }]; + optional uint64 address = 2 + [(sem_type) = { description: "The entry address." }]; + optional uint32 size = 3 [(sem_type) = { description: "The entry size." }]; + optional string guid = 4 [(sem_type) = { description: "The EFI GUID." }]; + optional string hash = 5 [(sem_type) = { description: "The hash value." }]; + optional uint32 flags = 6 [(sem_type) = { + description: "A bit field of flags that describe the volume " + "(known=1, volatile=2, ffs=4)." + }]; + optional uint32 index = 7 + [(sem_type) = { description: "The entry's volume index." }]; +} + +message FindFilesArgs { + reserved 3, 4; + + optional FindSpec findspec = 1 [(sem_type) = { + description: "A find operation specification.", + }]; +} + +message SendFileRequest { + optional PathSpec pathspec = 1 [(sem_type) = { + description: "The pathspec for the file to retrieve.", + }]; + + optional NetworkAddress.Family address_family = 2 [ + (sem_type) = { + description: "address family to use (AF_INET or AF_INET6).", + }, + default = INET + ]; + + optional string host = 3 [(sem_type) = { + description: "Hostname or IP to send the file to.", + }]; + + optional uint64 port = 4 [ + (sem_type) = { + description: "Port number on the listening server.", + }, + default = 12345 + ]; + + optional bytes key = 5 [(sem_type) = { + type: "AES128Key", + description: "An encryption key given in hex representation.", + }]; + + optional bytes iv = 6 [(sem_type) = { + type: "AES128Key", + description: "The iv for AES, also given in hex representation.", + }]; +} + +message FingerprintFileArgs { + optional PathSpec pathspec = 1 [(sem_type) = { + description: "The file path to fingerprint.", + }]; +} + +message FingerprintFileResult { + optional string file_urn = 1 [(sem_type) = { + type: "RDFURN", + description: "The URN of the file fingerprinted", + }]; + optional Hash hash_entry = 2 [(sem_type) = { + description: "File hash object.", + }]; +} diff --git a/grr/proto/grr_response_proto/dummy.proto b/grr/proto/grr_response_proto/dummy.proto new file mode 100644 index 0000000000..d04b8635e6 --- /dev/null +++ b/grr/proto/grr_response_proto/dummy.proto @@ -0,0 +1,23 @@ +syntax = "proto2"; + +package grr; + +// Arguments for Dummy Flow. +message DummyArgs { + optional string flow_input = 1; +} + +// Return for Dummy Flow. +message DummyFlowResult { + optional string flow_output = 1; +} + +// Arguments for Dummy ClientAction. +message DummyRequest { + optional string action_input = 1; +} + +// Return for Dummy ClientAction. +message DummyResult { + optional string action_output = 1; +} diff --git a/grr/proto/grr_response_proto/export.proto b/grr/proto/grr_response_proto/export.proto index 5dea238a74..77cc349b72 100644 --- a/grr/proto/grr_response_proto/export.proto +++ b/grr/proto/grr_response_proto/export.proto @@ -351,7 +351,7 @@ message ExportedDictItem { optional string value = 3; } -// Next field id: 6 +// Next field id: 7 message ExportedArtifactFilesDownloaderResult { optional ExportedMetadata metadata = 1; @@ -363,7 +363,7 @@ message ExportedArtifactFilesDownloaderResult { } // Protobuf for YaraProcessScan flow results export. -// Next field id: 8 +// Next field id: 9 message ExportedYaraProcessScanMatch { optional ExportedMetadata metadata = 1; @@ -379,7 +379,7 @@ message ExportedYaraProcessScanMatch { "includes all Yara rules, not only the one of this match.", }]; - reserved 5; + reserved 5, 8; optional string string_id = 6 [(sem_type) = { description: "The name of the string that matched.", diff --git a/grr/proto/grr_response_proto/flows.proto b/grr/proto/grr_response_proto/flows.proto index 4e5f92fa4c..e3206bd389 100644 --- a/grr/proto/grr_response_proto/flows.proto +++ b/grr/proto/grr_response_proto/flows.proto @@ -509,13 +509,6 @@ message DeleteGRRTempFilesArgs { }]; } -// Next field ID: 2 -message UninstallArgs { - optional bool kill = 1 [(sem_type) = { - description: "Kills the client if set.", - }]; -} - // Next field ID: 2 message UpdateConfigurationArgs { optional Dict config = 1 @@ -562,26 +555,6 @@ message OnlineNotificationArgs { }]; } -// Next field ID: 3 -message UpdateClientArgs { - reserved 1; - optional string binary_path = 2 [(sem_type) = { - description: "Identifies the binary uploaded to GRR server that has " - "to be run on the client to perform the update.", - }]; -} - -// Next field ID: 2 -message KeepAliveArgs { - optional uint64 duration = 1 [ - (sem_type) = { - type: "DurationSeconds", - description: "Until when should the client stay in the fast poll mode.", - }, - default = 3600 - ]; -} - // Next field ID: 3 message LaunchBinaryArgs { optional string binary = 1 [(sem_type) = { @@ -773,24 +746,6 @@ message InterrogateArgs { ]; } -// Next field ID: 2 -message FingerprintFileArgs { - optional PathSpec pathspec = 1 [(sem_type) = { - description: "The file path to fingerprint.", - }]; -} - -// Next field ID: 3 -message FingerprintFileResult { - optional string file_urn = 1 [(sem_type) = { - type: "RDFURN", - description: "The URN of the file fingerprinted", - }]; - optional Hash hash_entry = 2 [(sem_type) = { - description: "File hash object.", - }]; -} - // Next field ID: 2 message FileCollectorArgs { repeated FindSpec findspecs = 1 [(sem_type) = { @@ -885,19 +840,6 @@ message GlobArgs { }]; } -// Next field ID: 5 -message FindFilesArgs { - optional FindSpec findspec = 1 [(sem_type) = { - description: "A find operation specification.", - }]; - - // DEPRECATED - // optional uint64 max_results = 3; - - // DEPRECATED - // optional uint64 iteration_count = 4; -} - // Next field ID: 4 message GetFileArgs { optional PathSpec pathspec = 1 @@ -2267,7 +2209,7 @@ message YaraMatch { }]; } -// Next field ID: 5 +// Next field ID: 6 message YaraProcessScanMatch { optional Process process = 1 [(sem_type) = { description: "The process that returned one or more matches.", @@ -2278,7 +2220,7 @@ message YaraProcessScanMatch { optional uint64 scan_time_us = 3 [(sem_type) = { description: "Time in microseconds taken to perform the scan.", }]; - reserved 4; + reserved 4, 5; } message YaraProcessScanMiss { @@ -2572,20 +2514,6 @@ message FlowIterator { optional uint64 response_id = 4; } -// Next id: 53 -message ClientActionRequest { - optional string client_id = 1; - optional string flow_id = 2; - optional uint64 request_id = 3; - optional string action_identifier = 4; - optional google.protobuf.Any action_args = 5; - optional uint64 cpu_limit_ms = 6; - optional uint64 network_bytes_limit = 7; - optional uint64 runtime_limit_us = 52 [(sem_type) = { - type: "Duration", - }]; -} - // Next id: 35 message Flow { reserved 10; diff --git a/grr/proto/grr_response_proto/jobs.proto b/grr/proto/grr_response_proto/jobs.proto index 281e0de710..f5f863fa98 100644 --- a/grr/proto/grr_response_proto/jobs.proto +++ b/grr/proto/grr_response_proto/jobs.proto @@ -1191,41 +1191,6 @@ message ExecuteBinaryResponse { optional int32 time_used = 4; } -// This requests the client to encrypt a file and send it to a specified server. -message SendFileRequest { - optional PathSpec pathspec = 1 [(sem_type) = { - description: "The pathspec for the file to retrieve.", - }]; - - optional NetworkAddress.Family address_family = 2 [ - (sem_type) = { - description: "address family to use (AF_INET or AF_INET6).", - }, - default = INET - ]; - - optional string host = 3 [(sem_type) = { - description: "Hostname or IP to send the file to.", - }]; - - optional uint64 port = 4 [ - (sem_type) = { - description: "Port number on the listening server.", - }, - default = 12345 - ]; - - optional bytes key = 5 [(sem_type) = { - type: "AES128Key", - description: "An encryption key given in hex representation.", - }]; - - optional bytes iv = 6 [(sem_type) = { - type: "AES128Key", - description: "The iv for AES, also given in hex representation.", - }]; -} - // Grep searches the content of files for a hit and returns several // BufferReference. message GrepSpec { @@ -1939,6 +1904,7 @@ message MetricFieldDefinition { enum FieldType { INT = 0; STR = 1; + BOOL = 2; } optional string field_name = 1; diff --git a/grr/proto/grr_response_proto/knowledge_base.proto b/grr/proto/grr_response_proto/knowledge_base.proto index deb9a7acd2..230ee0e650 100644 --- a/grr/proto/grr_response_proto/knowledge_base.proto +++ b/grr/proto/grr_response_proto/knowledge_base.proto @@ -282,6 +282,8 @@ message KnowledgeBase { "drivers.", }]; + // TODO: `ProfilesDirectory` is not an environment variable, so + // this field should be renamed. optional string environ_profilesdirectory = 33 [(sem_type) = { description: "Folder that typically contains users' profile directories; " "e.g '%SystemDrive%\\Users'", diff --git a/grr/proto/grr_response_proto/rrg.proto b/grr/proto/grr_response_proto/rrg.proto index 28bd4225e5..c698ea22c9 100644 --- a/grr/proto/grr_response_proto/rrg.proto +++ b/grr/proto/grr_response_proto/rrg.proto @@ -60,7 +60,7 @@ message Request { // A limit on number of bytes sent by the action over the network. // // If the limit is reached, action execution is aborted. - uint64 network_bytes_sent_limit = 5; + uint64 network_bytes_limit = 5; // A limit of CPU time spent on executing the action. // @@ -105,6 +105,9 @@ message Response { message Status { // Information about action invocation failure. message Error { + // TODO(user): Add more details about circumstances in which the errors + // of specific type can be raised. + // List of all possible error types that can occur. enum Type { UNSET = 0; @@ -122,6 +125,10 @@ message Status { REAL_TIME_LIMIT_EXCEEDED = 6; // Action-specific error occurred. ACTION_FAILURE = 7; + // CPU time usage treshhold specified in the request is invalid. + INVALID_CPU_TIME_LIMIT = 8; + // Real (wall) time usage treshhold specified in the request is invalid. + INVALID_REAL_TIME_LIMIT = 9; } // Type of the error. diff --git a/grr/proto/grr_response_proto/rrg/action/list_connections.proto b/grr/proto/grr_response_proto/rrg/action/list_connections.proto new file mode 100644 index 0000000000..d13fbeef7d --- /dev/null +++ b/grr/proto/grr_response_proto/rrg/action/list_connections.proto @@ -0,0 +1,14 @@ +// Copyright 2023 Google LLC +// +// Use of this source code is governed by an MIT-style license that can be found +// in the LICENSE file or at https://opensource.org/licenses/MIT. +syntax = "proto3"; + +package rrg.action.list_connections; + +import "grr_response_proto/rrg/net.proto"; + +message Result { + // Information about the individual connection. + rrg.net.Connection connection = 1; +} diff --git a/grr/proto/grr_response_proto/rrg/net.proto b/grr/proto/grr_response_proto/rrg/net.proto new file mode 100644 index 0000000000..c56969fb25 --- /dev/null +++ b/grr/proto/grr_response_proto/rrg/net.proto @@ -0,0 +1,93 @@ +// Copyright 2023 Google LLC +// +// Use of this source code is governed by an MIT-style license that can be found +// in the LICENSE file or at https://opensource.org/licenses/MIT. +syntax = "proto3"; + +package rrg.net; + +// IP address (either IPv4 or IPv6). +message IpAddress { + // Octets that the IP address consists of. + // + // Required to have 4 bytes for IPv4 and 16 bytes for IPv6 addresses. + bytes octets = 1; +} + +// Socket address (either IPv4 or IPv6). +message SocketAddress { + // IP address associated with this socket address. + IpAddress ip_address = 1; + + // Port number associated with this socket address. + uint32 port = 2; +} + +// MAC address as defined in the IEEE 802 standard [1]. +// +// [1]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/macgrp.pdf +message MacAddress { + // Octets that the MAC address consists of. + // + // Required to have 6 bytes. + bytes octets = 1; +} + +// State of a TCP connection as described in RFC 793 [1]. +// +// [1]: https://www.ietf.org/rfc/rfc793.txt +enum TcpState { + UNKNOWN = 0x00; + ESTABLISHED = 0x01; + SYN_SENT = 0x02; + SYN_RECEIVED = 0x03; + FIN_WAIT_1 = 0x04; + FIN_WAIT_2 = 0x05; + TIME_WAIT = 0x06; + CLOSED = 0x07; + CLOSE_WAIT = 0x08; + LAST_ACK = 0x09; + LISTEN = 0x0A; + CLOSING = 0x0B; +} + +// Information about a TCP connection. +// +// The version of the protocol can be determined from the IP addresses. +message TcpConnection { + // Identifier of the process that owns the connection. + uint32 pid = 1; + + // Local address of the connection. + SocketAddress local_address = 2; + + // Remote address of the connection. + SocketAddress remote_address = 3; + + // State of the connection. + TcpState state = 4; +} + +// Information about a UDP connection. +// +// The version of the protocol can be determined from the IP addresses. +message UdpConnection { + // Identifier of the process that owns the connection. + uint32 pid = 1; + + // Local address of the connection. + SocketAddress local_address = 2; +} + +// Information about an Internet connection. +// +// The version of the protocol can be determined from the IP addresses. +message Connection { + oneof connection { + // Information about a TCP connection. + TcpConnection tcp = 1; + + // Information about a UDP connection. + UdpConnection udp = 2; + } +} diff --git a/grr/server/grr_response_server/action_registry.py b/grr/server/grr_response_server/action_registry.py index 0134c6674e..44af01cdfe 100644 --- a/grr/server/grr_response_server/action_registry.py +++ b/grr/server/grr_response_server/action_registry.py @@ -10,9 +10,8 @@ "DeleteGRRTempFiles": server_stubs.DeleteGRRTempFiles, "DumpACPITable": server_stubs.DumpACPITable, "DumpFlashImage": server_stubs.DumpFlashImage, + "Dummy": server_stubs.Dummy, "Echo": server_stubs.Echo, - "EficheckCollectHashes": server_stubs.EficheckCollectHashes, - "EficheckDumpImage": server_stubs.EficheckDumpImage, "EnumerateFilesystems": server_stubs.EnumerateFilesystems, "EnumerateInterfaces": server_stubs.EnumerateInterfaces, "EnumerateRunningServices": server_stubs.EnumerateRunningServices, @@ -46,12 +45,10 @@ "Osquery": server_stubs.Osquery, "ReadBuffer": server_stubs.ReadBuffer, "Segfault": server_stubs.Segfault, - "SendFile": server_stubs.SendFile, "SendStartupInfo": server_stubs.SendStartupInfo, "StatFS": server_stubs.StatFS, "TransferBuffer": server_stubs.TransferBuffer, "Timeline": server_stubs.Timeline, - "Uninstall": server_stubs.Uninstall, "UpdateAgent": server_stubs.UpdateAgent, "UpdateConfiguration": server_stubs.UpdateConfiguration, "VfsFileFinder": server_stubs.VfsFileFinder, diff --git a/grr/server/grr_response_server/artifact.py b/grr/server/grr_response_server/artifact.py index 8cb516473e..a2474955b5 100644 --- a/grr/server/grr_response_server/artifact.py +++ b/grr/server/grr_response_server/artifact.py @@ -2,6 +2,10 @@ """Base classes for artifacts.""" import logging +import ntpath +import os +import pathlib +import stat from typing import Iterable from typing import Iterator from typing import List @@ -13,9 +17,12 @@ from grr_response_core.lib import parsers from grr_response_core.lib import rdfvalue from grr_response_core.lib import utils +from grr_response_core.lib.parsers import windows_registry_parser from grr_response_core.lib.rdfvalues import anomaly as rdf_anomaly from grr_response_core.lib.rdfvalues import client as rdf_client +from grr_response_core.lib.rdfvalues import client_action as rdf_client_action from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs +from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder from grr_response_core.lib.rdfvalues import paths as rdf_paths from grr_response_core.lib.rdfvalues import protodict as rdf_protodict from grr_response_core.lib.rdfvalues import structs as rdf_structs @@ -24,6 +31,8 @@ from grr_response_server import data_store from grr_response_server import file_store from grr_response_server import flow_base +from grr_response_server import flow_responses +from grr_response_server import server_stubs from grr_response_server.databases import db @@ -113,6 +122,123 @@ def Start(self): next_state=self.ProcessBase.__name__, request_data={"artifact_name": artifact_name}) + if self.client_os == "Darwin": + list_users_dir_request = rdf_client_action.ListDirRequest() + list_users_dir_request.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS + list_users_dir_request.pathspec.path = "/Users" + + self.CallClient( + server_stubs.ListDirectory, + request=list_users_dir_request, + next_state=self._ProcessMacosListUsersDirectory.__name__, + ) + elif self.client_os == "Windows": + # pylint: disable=line-too-long + # pyformat: disable + # + # TODO: There is no dedicated action for obtaining registry + # values. The existing artifact collector uses `GetFileStat` action for + # this which is horrible. + args = rdf_client_action.GetFileStatRequest() + args.pathspec.pathtype = rdf_paths.PathSpec.PathType.REGISTRY + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\SystemRoot" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvSystemRoot.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\ProgramFilesDir" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvProgramFilesDir.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\ProgramFilesDir (x86)" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvProgramFilesDirX86.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\CommonFilesDir" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvCommonFilesDir.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\CommonFilesDir (x86)" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvCommonFilesDirX86.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\ProgramData" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvProgramData.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment\DriverData" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvDriverData.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\Select\Current" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsCurrentControlSet.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Nls\CodePage\ACP" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsCodePage.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Services\Tcpip\Parameters\Domain" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsDomain.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\TimeZoneInformation\TimeZoneKeyName" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsTimeZoneKeyName.__name__, + ) + + args = rdf_file_finder.FileFinderArgs() + # TODO: There is no dedicated action for obtaining registry + # values but `STAT` action of the file-finder will get it. This should be + # refactored once registry-specific actions are available. + args.action.action_type = rdf_file_finder.FileFinderAction.Action.STAT + args.pathtype = rdf_paths.PathSpec.PathType.REGISTRY + args.paths = [r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\*\ProfileImagePath"] + self.CallClient( + server_stubs.VfsFileFinder, + args, + next_state=self._ProcessWindowsProfiles.__name__, + ) + # TODO: We pretend that `WindowsRegistryProfiles` is being + # collected to avoid issues with the flow failing due its dependees not + # being possible to satisfy. Once the dependees are refactored not to be + # artifacts this can be removed. + self.state.in_flight_artifacts.add("WindowsRegistryProfiles") + # pylint: enable=line-too-long + # pyformat: enable + def _ScheduleCollection(self): # Schedule any new artifacts for which we have now fulfilled dependencies. for artifact_name in self.state.awaiting_deps_artifacts: @@ -248,6 +374,834 @@ def SetKBValue(self, artifact_name, responses): return provided + def _ProcessMacosListUsersDirectory( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to list macOS users directory: %s", responses.status) + return + + for response in responses: + if not isinstance(response, rdf_client_fs.StatEntry): + self.Log("Unexpected response type: '%s'", type(response)) + continue + + # TODO: `st_mode` should be an `int`, not `StatMode`. + if not stat.S_ISDIR(int(response.st_mode)): + self.Log("Unexpected users directory entry mode: %s", response.st_mode) + continue + + username = os.path.basename(response.pathspec.path) + if username == "Shared": + # `Shared` is a special entry in the `Users` directory that we do not + # want to report as an actual user. + continue + + user = rdf_client.User() + user.username = username + user.homedir = response.pathspec.path + self.state.knowledge_base.MergeOrAddUser(user) + + def _ProcessWindowsEnvSystemRoot( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain `%%SystemRoot%%`: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + system_root = response.registry_data.string + system_drive = pathlib.PureWindowsPath(system_root).drive + + self.state.knowledge_base.environ_systemroot = system_root + self.state.knowledge_base.environ_systemdrive = system_drive + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_systemroot") + self.state.fulfilled_deps.add("environ_systemdrive") + + # pylint: disable=line-too-long + # pyformat: disable + # + # TODO: The following values depend on `SystemRoot` so we have + # to schedule its collection after we have root. However, this requires + # intrinsic knowledge and is not much better than just hardcoding them. + # Instead, we should collect all variables as they are and then do the + # interpolation without hardcoding the dependencies. + # + # TODO: There is no dedicated action for obtaining registry + # values. The existing artifact collector uses `GetFileStat` action for + # this which is horrible. + args = rdf_client_action.GetFileStatRequest() + args.pathspec.pathtype = rdf_paths.PathSpec.PathType.REGISTRY + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment\TEMP" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvTemp.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment\Path" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvPath.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment\ComSpec" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvComSpec.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment\windir" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvWindir.__name__, + ) + + args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\ProfilesDirectory" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsProfilesDirectory.__name__, + ) + # pylint: enable=line-too-long + # pyformat: enable + + def _ProcessWindowsEnvProgramFilesDir( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain `%%ProgramFiles%%`: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + program_files = response.registry_data.string + + self.state.knowledge_base.environ_programfiles = program_files + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_programfiles") + + def _ProcessWindowsEnvProgramFilesDirX86( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain `%%ProgramFiles%%`: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + program_files_x86 = response.registry_data.string + + self.state.knowledge_base.environ_programfilesx86 = program_files_x86 + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_programfilesx86") + + def _ProcessWindowsEnvCommonFilesDir( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + status = responses.status + self.Log("Failed to obtain `%%CommonProgramFiles%%`: %s", status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + common_files = response.registry_data.string + + self.state.knowledge_base.environ_commonprogramfiles = common_files + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_commonprogramfiles") + + def _ProcessWindowsEnvCommonFilesDirX86( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + status = responses.status + self.Log("Failed to obtain `%%CommonProgramFiles (x86)%%`: %s", status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + common_files_x86 = response.registry_data.string + + self.state.knowledge_base.environ_commonprogramfilesx86 = common_files_x86 + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_commonprogramfilesx86") + + def _ProcessWindowsEnvProgramData( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain `%%ProgramData%%`: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + program_data = response.registry_data.string + # TODO: We should not hardcode the dependency on `%SystemRoot%` + # and do an interpolation pass once all variables are there. + program_data = artifact_utils.ExpandWindowsEnvironmentVariables( + program_data, + self.state.knowledge_base, + ) + + self.state.knowledge_base.environ_programdata = program_data + # TODO: Remove once this knowledge base field is removed. + self.state.knowledge_base.environ_allusersappdata = program_data + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_programdata") + self.state.fulfilled_deps.add("environ_allusersappdata") + + # pylint: disable=line-too-long + # pyformat: disable + # + # Interestingly, it looks like there is no such value in the registry on + # Windows 10. But the original artifact uses this path and there are other + # websites stating that it should be there [1, 2] we try this anyway. + # + # According to Wikipedia [3] this value since Windows Vista is deprecated in + # favour of `%PRORGAMDATA%` so e fallback to that in case we cannot retrieve + # it. + # + # [1]: https://renenyffenegger.ch/notes/Windows/dirs/ProgramData/index + # [2]: https://winreg-kb.readthedocs.io/en/latest/sources/system-keys/Environment-variables.html#currentversion-profilelist-key + # [3]: https://en.wikipedia.org/wiki/Environment_variable#ALLUSERSPROFILE + # + # TODO: There is no dedicated action for obtaining registry + # values. The existing artifact collector uses `GetFileStat` action for + # this which is horrible. + args = rdf_client_action.GetFileStatRequest() + args.pathspec.pathtype = rdf_paths.PathSpec.PathType.REGISTRY + args.pathspec.path = r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows NT\CurrentVersion\ProfileList\AllUsersProfile" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsEnvAllUsersProfile.__name__, + ) + # pylint: enable=line-too-long + # pyformat: enable + + def _ProcessWindowsEnvDriverData( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain `%%DriverData%%`: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + driver_data = response.registry_data.string + + self.state.knowledge_base.environ_driverdata = driver_data + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_driverdata") + + def _ProcessWindowsCurrentControlSet( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain current control set: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + current = response.registry_data.integer + if not (0 < current < 1000): + raise flow_base.FlowError(f"Unexpected control set index: {current}") + + current_control_set = rf"HKEY_LOCAL_MACHINE\SYSTEM\ControlSet{current:03}" + + self.state.knowledge_base.current_control_set = current_control_set + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("current_control_set") + + def _ProcessWindowsCodePage( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain code page: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + code_page = f"cp_{response.registry_data.string}" + + self.state.knowledge_base.code_page = code_page + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("code_page") + + def _ProcessWindowsDomain( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain domain: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + self.state.knowledge_base.domain = response.registry_data.string + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("domain") + + def _ProcessWindowsTimeZoneKeyName( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + def CollectWindowsTimeZoneStandardName(): + # TODO: There is no dedicated action for obtaining registry + # values. The existing artifact collector uses `GetFileStat` action for + # this which is horrible. + # + # pylint: disable=line-too-long + # pyformat: disable + args = rdf_client_action.GetFileStatRequest() + args.pathspec.pathtype = rdf_paths.PathSpec.PathType.REGISTRY + args.pathspec.path = r"HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\TimeZoneInformation\StandardName" + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._ProcessWindowsTimeZoneStandardName.__name__, + ) + # pylint: enable=line-too-long + # pyformat: enable + + if not responses.success: + self.Log("Failed to obtain time zone key name: %s", responses.status) + CollectWindowsTimeZoneStandardName() + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + time_zone_key_name = response.registry_data.string + try: + time_zone = windows_registry_parser.ZONE_LIST[time_zone_key_name] + except KeyError: + self.Log("Failed to parse time zone key name: %r", time_zone_key_name) + # We set the time zone as "unknown" with the raw value in case the call + # to get the standard name time zone also fails. + self.state.knowledge_base.time_zone = f"Unknown ({time_zone_key_name!r})" + CollectWindowsTimeZoneStandardName() + return + + self.state.knowledge_base.time_zone = time_zone + + # TODO: This should be deleted once `provides` sections are + # no longer used. + self.state.fulfilled_deps.add("time_zone") + + def _ProcessWindowsTimeZoneStandardName( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain time zone standard name: %s", responses.status) + # At this point it is possible that we have set the timezone to unknown + # with the raw value if we managed to at least get the time zone key name + # in the _ProcessWindowsTimeZoneKeyName` method. + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + time_zone_standard_name = response.registry_data.string + try: + time_zone = windows_registry_parser.ZONE_LIST[time_zone_standard_name] + except KeyError: + self.Log( + "Failed to parse time zone standard name: %r", + time_zone_standard_name, + ) + # We always override this value—even in case we set some "unknown" time + # zone with a raw key name before, the "standard" one is going to be more + # readable. + self.state.knowledge_base.time_zone = ( + f"Unknown ({time_zone_standard_name!r})" + ) + return + + self.state.knowledge_base.time_zone = time_zone + + # TODO: This should be deleted once `provides` sections are + # no longer used. + self.state.fulfilled_deps.add("time_zone") + + def _ProcessWindowsEnvTemp( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain `%%TEMP%%`: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + temp = response.registry_data.string + # TODO: We should not hardcode the dependency of `TEMP` on + # `SystemRoot` and do an interpolation pass once all variables are there. + temp = artifact_utils.ExpandWindowsEnvironmentVariables( + temp, + self.state.knowledge_base, + ) + + self.state.knowledge_base.environ_temp = temp + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_temp") + + def _ProcessWindowsEnvPath( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain `%%Path%%`: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + path = response.registry_data.string + # TODO: We should not hardcode the dependency of `Path` on + # `SystemRoot` and do an interpolation pass once all variables are there. + path = artifact_utils.ExpandWindowsEnvironmentVariables( + path, + self.state.knowledge_base, + ) + + self.state.knowledge_base.environ_path = path + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_path") + + def _ProcessWindowsEnvComSpec( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain `%%ComSpec%%`: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + com_spec = response.registry_data.string + # TODO: We should not hardcode the dependency of `ComSpec` on + # `SystemRoot` and do an interpolation pass once all variables are there. + com_spec = artifact_utils.ExpandWindowsEnvironmentVariables( + com_spec, + self.state.knowledge_base, + ) + + self.state.knowledge_base.environ_comspec = com_spec + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_comspec") + + def _ProcessWindowsEnvWindir( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain `%%windir%%`: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + windir = response.registry_data.string + # TODO: We should not hardcode the dependency of `windir` on + # `SystemRoot` and do an interpolation pass once all variables are there. + windir = artifact_utils.ExpandWindowsEnvironmentVariables( + windir, + self.state.knowledge_base, + ) + + self.state.knowledge_base.environ_windir = windir + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_windir") + + def _ProcessWindowsProfilesDirectory( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain profiles directory: %s", responses.status) + return + + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + profiles_directory = response.registry_data.string + # TODO: We should not hardcode the dependency on `SystemDrive` + # and do an interpolation pass once all variables are there. + profiles_directory = artifact_utils.ExpandWindowsEnvironmentVariables( + profiles_directory, + self.state.knowledge_base, + ) + + self.state.knowledge_base.environ_profilesdirectory = profiles_directory + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_profilesdirectory") + + def _ProcessWindowsEnvAllUsersProfile( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if responses.success: + if len(responses) != 1: + message = f"Unexpected number of responses: {len(responses)}" + raise flow_base.FlowError(message) + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + message = f"Unexpected response type: {type(response)}" + raise flow_base.FlowError(message) + + allusersprofile = response.registry_data.string + else: + # Since Windows Vista `%PROGRAMDATA%` superseded `%ALLUSERSPROFILE%` [1], + # so we fall back to that in case we cannot obtain it (which is expected + # on most modern machines and thus we don't even log an error). + # + # [1]: https://en.wikipedia.org/wiki/Environment_variable#ALLUSERSPROFILE + allusersprofile = self.state.knowledge_base.environ_programdata + + # TODO: We should not hardcode dependency on `%ProgramData%` + # and do an interpolation pass once all variables are there. + allusersprofile = artifact_utils.ExpandWindowsEnvironmentVariables( + allusersprofile, + self.state.knowledge_base, + ) + + self.state.knowledge_base.environ_allusersprofile = allusersprofile + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("environ_allusersprofile") + + def _ProcessWindowsProfiles( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + # TODO: Delete this once dependent artifacts are removed. + self.state.in_flight_artifacts.remove("WindowsRegistryProfiles") + + if not responses.success: + self.Log("Failed to obtain Windows profiles: %s", responses.status) + return + + for response in responses: + if not isinstance(response, rdf_file_finder.FileFinderResult): + raise flow_base.FlowError(f"Unexpected response type: {type(response)}") + + sid = ntpath.basename(ntpath.dirname(response.stat_entry.pathspec.path)) + home = response.stat_entry.registry_data.string + + if not windows_registry_parser.SID_RE.match(sid): + # There are some system profiles that do not match, so we don't log any + # errors and just silently continue. + continue + + user = rdf_client.User() + user.sid = sid + user.homedir = user.userprofile = home + user.username = ntpath.basename(home) + + self.state.knowledge_base.users.append(user) + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("users.sid") + self.state.fulfilled_deps.add("users.userprofile") + self.state.fulfilled_deps.add("users.homedir") + self.state.fulfilled_deps.add("users.username") + + args = rdf_file_finder.FileFinderArgs() + # TODO: There is no dedicated action for obtaining registry + # values but `STAT` action of the file-finder will get it. This should be + # refactored once registry-specific actions are available. + args.action.action_type = rdf_file_finder.FileFinderAction.Action.STAT + args.pathtype = rdf_paths.PathSpec.PathType.REGISTRY + + for user in self.state.knowledge_base.users: + # pylint: disable=line-too-long + # pyformat: disable + args.paths.extend([ + rf"HKEY_USERS\{user.sid}\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\*", + rf"HKEY_USERS\{user.sid}\Environment\*", + rf"HKEY_USERS\{user.sid}\Volatile Environment\*", + ]) + # pylint: enable=line-too-long + # pyformat: enable + + self.CallClient( + server_stubs.VfsFileFinder, + args, + next_state=self._ProcessWindowsProfileExtras.__name__, + ) + + # WMI queries are slow, so we consider them "heavyweight". + if not self.args.lightweight: + users = self.state.knowledge_base.users + + args = rdf_client_action.WMIRequest() + args.query = f""" + SELECT * + FROM Win32_UserAccount + WHERE {" OR ".join(f"name = '{user.username}'" for user in users)} + """ + self.CallClient( + server_stubs.WmiQuery, + args, + next_state=self._ProcessWindowsWMIUserAccounts.__name__, + ) + + def _ProcessWindowsProfileExtras( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain Windows profile extras: %s", responses.status) + return + + users_by_sid = {user.sid: user for user in self.state.knowledge_base.users} + + for response in responses: + if not isinstance(response, rdf_file_finder.FileFinderResult): + raise flow_base.FlowError(f"Unexpected response type: {type(response)}") + + path = pathlib.PureWindowsPath(response.stat_entry.pathspec.path) + parts = path.parts + + # TODO: Sometimes we get leading slashes and sometimes not, + # so `parts` can have inconsistent prefix. We locate `HKEY_USERS` instead. + # Once we have dedicated action for retrieving data from the registry in + # a consistent way, we should remove this workaround. + try: + hive_index = parts.index("HKEY_USERS") + except ValueError: + self.Log("Registry hive not found for %r", path) + continue + + sid = parts[hive_index + 1] + if not windows_registry_parser.SID_RE.match(sid): + self.Log("Unexpected registry SID for %r", path) + continue + + try: + user = users_by_sid[sid] + except KeyError: + self.Log("Missing users with SID %r", sid) + continue + + registry_key = parts[-2] + registry_value = parts[-1] + registry_data = response.stat_entry.registry_data.string + + attrs = windows_registry_parser.WinUserSpecialDirs.key_var_mapping + try: + attr = attrs[registry_key][registry_value] + except KeyError: + self.Log("Invalid registry value for %r", path) + continue + + setattr(user, attr, registry_data) + + def _ProcessWindowsWMIUserAccounts( + self, + responses: flow_responses.Responses[rdfvalue.RDFValue], + ) -> None: + if not responses.success: + self.Log("Failed to obtain WMI user accounts: %s", responses.status) + return + + users_by_sid = {user.sid: user for user in self.state.knowledge_base.users} + + for response in responses: + if not isinstance(response, rdf_protodict.Dict): + raise flow_base.FlowError(f"Unexpected response type: {type(response)}") + + try: + sid = response["SID"] + except KeyError: + self.Log("Missing 'SID' from WMI result") + continue + + try: + domain = response["Domain"] + except KeyError: + self.Log("Missing 'Domain' from WMI result") + continue + + try: + user = users_by_sid[sid] + except KeyError: + self.Log("Missing user with SID %r", sid) + continue + + user.userdomain = domain + + # TODO: This should be deleted once `provides` sections are no + # longer used. + self.state.fulfilled_deps.add("users.userdomain") + def End(self, responses): """Finish up.""" del responses @@ -272,10 +1226,16 @@ def GetFirstFlowsForCollection(self): for artifact_name in kb_set: artifact_registry.REGISTRY.GetArtifact(artifact_name) - no_deps_names = artifact_registry.REGISTRY.GetArtifactNames( - os_name=self.state.knowledge_base.os, - name_list=kb_set, - exclude_dependents=True) + # If `kb_set` is empty, `GetArtifactNames` returns *all* artifacts in the + # system for the given platform, which is not what we want. + if kb_set: + no_deps_names = artifact_registry.REGISTRY.GetArtifactNames( + os_name=self.state.knowledge_base.os, + name_list=kb_set, + exclude_dependents=True, + ) + else: + no_deps_names = set() name_deps, self.state.all_deps = ( artifact_registry.REGISTRY.SearchDependencies( diff --git a/grr/server/grr_response_server/artifact_registry.py b/grr/server/grr_response_server/artifact_registry.py index 9847722070..dc2bd09977 100644 --- a/grr/server/grr_response_server/artifact_registry.py +++ b/grr/server/grr_response_server/artifact_registry.py @@ -407,6 +407,23 @@ def GetArtifact(self, name): "artifact repo by running make in the artifact directory." % name) return result + @utils.Synchronized + def Exists(self, name: str) -> bool: + """Checks whether the artifact of the specified name exists in the registry. + + Args: + name: A name of the artifact. + + Returns: + `True` if the artifact exists, `False` otherwise. + """ + try: + self.GetArtifact(name) + except rdf_artifacts.ArtifactNotRegisteredError: + return False + + return True + @utils.Synchronized def GetArtifactNames(self, *args, **kwargs): return set([a.name for a in self.GetArtifacts(*args, **kwargs)]) diff --git a/grr/server/grr_response_server/artifact_registry_test.py b/grr/server/grr_response_server/artifact_registry_test.py index e8c75240cf..cc4e75c52d 100644 --- a/grr/server/grr_response_server/artifact_registry_test.py +++ b/grr/server/grr_response_server/artifact_registry_test.py @@ -280,6 +280,17 @@ def testDatabaseArtifactsAreLoadedEvenIfNoDatastoreIsRegistered(self): self.assertIsNotNone(registry.GetArtifact("Foo")) + def testExistsTrue(self): + registry = ar.ArtifactRegistry() + registry.RegisterArtifact(rdf_artifacts.Artifact(name="Foo")) + + self.assertTrue(registry.Exists("Foo")) + + def testExistsFalse(self): + registry = ar.ArtifactRegistry() + + self.assertFalse(registry.Exists("Foo")) + if __name__ == "__main__": app.run(test_lib.main) diff --git a/grr/server/grr_response_server/artifact_test.py b/grr/server/grr_response_server/artifact_test.py index 25a3452f6f..76cc002cef 100644 --- a/grr/server/grr_response_server/artifact_test.py +++ b/grr/server/grr_response_server/artifact_test.py @@ -15,6 +15,8 @@ from absl.testing import absltest from grr_response_client import actions +from grr_response_client.client_actions import file_fingerprint +from grr_response_client.client_actions import searching from grr_response_client.client_actions import standard from grr_response_core import config from grr_response_core.lib import parser @@ -37,7 +39,6 @@ from grr_response_server.databases import db from grr_response_server.databases import db_test_utils from grr_response_server.flows.general import collectors -from grr_response_server.flows.general import filesystem from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects from grr_response_server.rdfvalues import objects as rdf_objects from grr.test_lib import action_mocks @@ -183,7 +184,7 @@ class ArtifactTest(flow_test_lib.FlowTestsBaseclass): def setUp(self): """Make sure things are initialized.""" super().setUp() - self.client_mock = action_mocks.FileFinderClientMock() + self.client_mock = action_mocks.ClientFileFinderWithVFS() patcher = artifact_test_lib.PatchDefaultArtifactRegistry() patcher.start() @@ -420,9 +421,11 @@ def testFilesArtifact(self): """Check GetFiles artifacts.""" client_id = test_lib.TEST_CLIENT_ID with vfs_test_lib.FakeTestDataVFSOverrider(): - self.RunCollectorAndGetResults(["TestFilesArtifact"], - client_mock=self.client_mock, - client_id=client_id) + self.RunCollectorAndGetResults( + ["TestFilesArtifact"], + client_mock=action_mocks.ClientFileFinderWithVFS(), + client_id=client_id, + ) cp = db.ClientPath.OS(client_id, ("var", "log", "auth.log")) fd = file_store.OpenFile(cp) self.assertNotEmpty(fd.read()) @@ -431,9 +434,11 @@ def testFilesArtifact(self): def testLinuxPasswdHomedirsArtifact(self): """Check LinuxPasswdHomedirs artifacts.""" with vfs_test_lib.FakeTestDataVFSOverrider(): - fd = self.RunCollectorAndGetResults(["LinuxPasswdHomedirs"], - client_mock=self.client_mock, - client_id=test_lib.TEST_CLIENT_ID) + fd = self.RunCollectorAndGetResults( + ["LinuxPasswdHomedirs"], + client_mock=action_mocks.ClientFileFinderWithVFS(), + client_id=test_lib.TEST_CLIENT_ID, + ) self.assertLen(fd, 5) self.assertCountEqual( @@ -551,10 +556,22 @@ class GrrKbTest(ArtifactTest): def _RunKBI(self, **kw): session_id = flow_test_lib.TestFlowHelper( artifact.KnowledgeBaseInitializationFlow.__name__, - self.client_mock, + # TODO: remove additional client actions when Glob flow + # ArtifactCollectorFlow dependency is removed. + action_mocks.ClientFileFinderWithVFS( + file_fingerprint.FingerprintFile, + searching.Find, + searching.Grep, + standard.HashBuffer, + standard.HashFile, + standard.GetFileStat, + standard.ListDirectory, + standard.TransferBuffer, + ), client_id=test_lib.TEST_CLIENT_ID, creator=self.test_username, - **kw) + **kw, + ) results = flow_test_lib.GetFlowResults(test_lib.TEST_CLIENT_ID, session_id) self.assertLen(results, 1) @@ -589,7 +606,7 @@ def testKnowledgeBaseRetrievalWindows(self): self.assertEqual(kb.environ_windir, "C:\\Windows") self.assertEqual(kb.environ_profilesdirectory, "C:\\Users") - self.assertEqual(kb.environ_allusersprofile, "C:\\Users\\All Users") + self.assertEqual(kb.environ_allusersprofile, "C:\\ProgramData") self.assertEqual(kb.environ_allusersappdata, "C:\\ProgramData") self.assertEqual(kb.environ_temp, "C:\\Windows\\TEMP") self.assertEqual(kb.environ_systemdrive, "C:") @@ -599,44 +616,6 @@ def testKnowledgeBaseRetrievalWindows(self): self.assertEqual(user.username, "jim") self.assertEqual(user.sid, "S-1-5-21-702227068-2140022151-3110739409-1000") - @parser_test_lib.WithParser("MultiProvide", MultiProvideParser) - def testKnowledgeBaseMultiProvides(self): - """Check we can handle multi-provides.""" - # Replace some artifacts with test one that will run the MultiProvideParser. - self.LoadTestArtifacts() - with test_lib.ConfigOverrider( - {"Artifacts.knowledge_base": ["DepsProvidesMultiple"]}): - kb = self._RunKBI() - - self.assertEqual(kb.environ_temp, "tempvalue") - self.assertEqual(kb.environ_path, "pathvalue") - - def testGlobRegistry(self): - """Test that glob works on registry.""" - client_id = test_lib.TEST_CLIENT_ID - paths = [ - "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT" - "\\CurrentVersion\\ProfileList\\ProfilesDirectory", - "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT" - "\\CurrentVersion\\ProfileList\\AllUsersProfile" - ] - - flow_test_lib.TestFlowHelper( - filesystem.Glob.__name__, - self.client_mock, - paths=paths, - pathtype=rdf_paths.PathSpec.PathType.REGISTRY, - client_id=client_id, - creator=self.test_username) - path = paths[0].replace("\\", "/") - - path_info = data_store.REL_DB.ReadPathInfo( - client_id, - rdf_objects.PathInfo.PathType.REGISTRY, - components=tuple(path.split("/"))) - self.assertEqual(path_info.stat_entry.registry_data.GetValue(), - "%SystemDrive%\\Users") - @parser_test_lib.WithAllParsers def testGetKBDependencies(self): """Test that KB dependencies are calculated correctly.""" @@ -816,8 +795,7 @@ def setUp(self): @parser_test_lib.WithAllParsers def testKnowledgeBaseRetrievalDarwin(self): """Check we can retrieve a Darwin kb.""" - with test_lib.ConfigOverrider( - {"Artifacts.knowledge_base": ["UsersDirectory"]}): + with test_lib.ConfigOverrider({"Artifacts.knowledge_base": []}): with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS, vfs_test_lib.ClientVFSHandlerFixture): kb = self._RunKBI() diff --git a/grr/server/grr_response_server/bin/fleetspeak_frontend_server.py b/grr/server/grr_response_server/bin/fleetspeak_frontend_server.py index 93db7fa09f..7e742d99a0 100644 --- a/grr/server/grr_response_server/bin/fleetspeak_frontend_server.py +++ b/grr/server/grr_response_server/bin/fleetspeak_frontend_server.py @@ -1,13 +1,14 @@ #!/usr/bin/env python """This is the GRR frontend FS Server.""" import logging -from typing import Sequence +from typing import FrozenSet, Sequence, Tuple import grpc from grr_response_core import config from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import flows as rdf_flows +from grr_response_core.lib.util import cache from grr_response_core.stats import metrics from grr_response_server import communicator from grr_response_server import data_store @@ -44,6 +45,23 @@ WARN_IF_PROCESSING_LONGER_THAN = rdfvalue.Duration.From(30, rdfvalue.SECONDS) +@cache.WithLimitedCallFrequencyWithoutReturnValue( + MIN_DELAY_BETWEEN_METADATA_UPDATES +) +def RateLimitedWriteClientMetadata( + client_id: str, + # fleetspeak_validation_info has to be hashable in order for the decorator + # function to work. Hence using frozenset instead of a dict. + fleetspeak_validation_info: FrozenSet[Tuple[str, str]], +) -> None: + """Rate-limiter to prevent overload of a single DB row on heavy QPS load.""" + data_store.REL_DB.WriteClientMetadata( + client_id, + last_ping=rdfvalue.RDFDatetime.Now(), + fleetspeak_validation_info=dict(fleetspeak_validation_info), + ) + + class GRRFSServer: """The GRR FS frontend server. @@ -102,10 +120,22 @@ def _LogDelayed(msg: str) -> None: "Writing client metadata for existing client " f"(time_since_last_ping={time_since_last_ping}" ) - data_store.REL_DB.WriteClientMetadata( + # Even though we explicitly check for the last_ping timestamp to + # be older than (now - MIN_DELAY_BETWEEN_METADATA_UPDATES), we + # still can experience WriteClientMetadata spikes when a client + # sends a lot of messages together after more than + # MIN_DELAY_BETWEEN_METADATA_UPDATES seconds of silence. These + # messages are likely to be handled by various threads of the + # same GRR Fleetspeak Frontend process. This creates a race + # condition: multiple threads of the process will read the same + # row, check the last ping and decided to update it. Rate-limiting + # the calls protects against this scenario. Note: it doesn't + # protect against the scenario of multiple GRR Fletspeak Frontend + # processes receiving the messages at the same time, but such + # protection currently is likely excessive. + RateLimitedWriteClientMetadata( grr_client_id, - last_ping=now, - fleetspeak_validation_info=validation_info, + frozenset(validation_info.items()), ) _LogDelayed("Written client metadata for existing client") diff --git a/grr/server/grr_response_server/blob_store.py b/grr/server/grr_response_server/blob_store.py index 1569f2c0fe..e0e1b575e3 100644 --- a/grr/server/grr_response_server/blob_store.py +++ b/grr/server/grr_response_server/blob_store.py @@ -165,6 +165,22 @@ def ReadAndWaitForBlobs( return results + def ReadAndWaitForBlob( + self, + blob_id: rdf_objects.BlobID, + timeout: rdfvalue.Duration, + ) -> Optional[bytes]: + """Reads the specified blobs waiting until it is available or times out. + + Args: + blob_id: An identifier of the blob to read. + timeout: A timeout after which `None` is returned instead. + + Returns: + Content of the requested blob or `None` if the timeout was reached. + """ + return self.ReadAndWaitForBlobs([blob_id], timeout)[blob_id] + def WaitForBlobs( self, blob_ids: Iterable[rdf_objects.BlobID], diff --git a/grr/server/grr_response_server/blob_store_test_mixin.py b/grr/server/grr_response_server/blob_store_test_mixin.py index 0bf5b8f1d8..4dfa2ea452 100644 --- a/grr/server/grr_response_server/blob_store_test_mixin.py +++ b/grr/server/grr_response_server/blob_store_test_mixin.py @@ -3,6 +3,7 @@ import abc +import os import threading import time from unittest import mock @@ -218,6 +219,26 @@ def testReadAndWaitForBlobsPopulatesStats(self, sleep_mock): timeout=rdfvalue.Duration.From( 10, rdfvalue.SECONDS)) + def testReadAndWaitForBlobExisting(self): + blob = os.urandom(256) + blob_id = self.blob_store.WriteBlobWithUnknownHash(blob) + + read_blob = self.blob_store.ReadAndWaitForBlob( + blob_id, + timeout=rdfvalue.Duration.From(0, rdfvalue.SECONDS), + ) + self.assertEqual(read_blob, blob) + + def testReadAndWaitForBlobTimeout(self): + blob = os.urandom(256) + blob_id = rdf_objects.BlobID.FromBlobData(blob) + + read_blob = self.blob_store.ReadAndWaitForBlob( + blob_id, + timeout=rdfvalue.Duration.From(0, rdfvalue.SECONDS), + ) + self.assertIsNone(read_blob) + def testWaitForBlobsDoesNotWaitIfBlobsAreAlreadyPresent(self): timeout = rdfvalue.Duration.From(0, rdfvalue.SECONDS) diff --git a/grr/server/grr_response_server/client_fixture.py b/grr/server/grr_response_server/client_fixture.py index 0ab9bf03ba..ec32e1b160 100644 --- a/grr/server/grr_response_server/client_fixture.py +++ b/grr/server/grr_response_server/client_fixture.py @@ -3718,6 +3718,21 @@ } """ })), + (u"/registry/HKEY_USERS/S-1-5-21-702227000-2140022111-3110739999-1990/SOFTWARE/Microsoft/Windows/CurrentVersion/Explorer/Shell Folders/Desktop", ("File", { + "stat": """ +st_mode: 32768 +st_size: 23 +st_mtime: 1247547054 +registry_type: REG_SZ +pathspec { + path: "HKEY_USERS/S-1-5-21-702227000-2140022111-3110739999-1990/SOFTWARE/Microsoft/Windows/CurrentVersion/Explorer/Shell Folders/Desktop" + pathtype: REGISTRY +} +registry_data { + string: "C:\\\\Users\\\\foobar\\\\Desktop" +} + """ + })), (u"/registry/HKEY_LOCAL_MACHINE/SYSTEM/Select/Current", ("File", { "stat": @@ -3752,6 +3767,25 @@ } """ })), + # pylint: disable=line-too-long + # pyformat: disable + ("/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows NT/CurrentVersion/ProfileList/AllUsersProfile", ("File", { + "stat": """ +st_mode: 32768 +st_size: 1 +st_mtime: 0 +registry_type: REG_EXPAND_SZ +pathspec { + pathtype: REGISTRY + path: "HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows NT/CurrentVersion/ProfileList/AllUsersProfile" +} +registry_data { + string: "%%ProgramData%%" +} + """ + })), + # pylint: enable=line-too-long + # pyformat: enable (u"/registry/HKEY_LOCAL_MACHINE/SYSTEM/CurrentControlSet/Control/Session " u"Manager/Environment/windir", ("File", { "stat": diff --git a/grr/server/grr_response_server/databases/db.py b/grr/server/grr_response_server/databases/db.py index a28ecc416a..df1e3566cc 100644 --- a/grr/server/grr_response_server/databases/db.py +++ b/grr/server/grr_response_server/databases/db.py @@ -632,6 +632,40 @@ def DeleteArtifact(self, name): """ @abc.abstractmethod + def MultiWriteClientMetadata( + self, + client_ids: Collection[str], + certificate: Optional[rdf_crypto.RDFX509Cert] = None, + first_seen: Optional[rdfvalue.RDFDatetime] = None, + last_ping: Optional[rdfvalue.RDFDatetime] = None, + last_clock: Optional[rdfvalue.RDFDatetime] = None, + last_ip: Optional[rdf_client_network.NetworkAddress] = None, + last_foreman: Optional[rdfvalue.RDFDatetime] = None, + fleetspeak_validation_info: Optional[Mapping[str, str]] = None, + ) -> None: + """Writes ClientMetadata records for a list of clients. + + Updates one or more client metadata fields for a list of clients. Any of + the data fields can be left as None, and in this case are not changed. + + Args: + client_ids: A collection of GRR client id strings, e.g. + ["C.ea3b2b71840d6fa7", "C.ea3b2b71840d6fa8"] + certificate: If set, should be an rdfvalues.crypto.RDFX509 protocol + buffer. Normally only set during initial client record creation. + first_seen: An rdfvalue.Datetime, indicating the first time the client + contacted the server. + last_ping: An rdfvalue.Datetime, indicating the last time the client + contacted the server. + last_clock: An rdfvalue.Datetime, indicating the last client clock time + reported to the server. + last_ip: An rdfvalues.client.NetworkAddress, indicating the last observed + ip address for the client. + last_foreman: An rdfvalue.Datetime, indicating the last time that the + client sent a foreman message to the server. + fleetspeak_validation_info: A dict with validation info from Fleetspeak. + """ + def WriteClientMetadata( self, client_id: str, @@ -664,6 +698,16 @@ def WriteClientMetadata( client sent a foreman message to the server. fleetspeak_validation_info: A dict with validation info from Fleetspeak. """ + self.MultiWriteClientMetadata( + client_ids=[client_id], + certificate=certificate, + first_seen=first_seen, + last_ping=last_ping, + last_clock=last_clock, + last_ip=last_ip, + last_foreman=last_foreman, + fleetspeak_validation_info=fleetspeak_validation_info, + ) @abc.abstractmethod def DeleteClient(self, client_id): @@ -1922,48 +1966,6 @@ def ReadHashBlobReferences( # and remove the message to avoid endless repetition of some broken action. CLIENT_MESSAGES_TTL = 5 - @abc.abstractmethod - def WriteClientActionRequests(self, requests): - """Writes messages that should go to the client to the db. - - Args: - requests: A list of ClientActionRequest objects to write. - """ - - @abc.abstractmethod - def LeaseClientActionRequests(self, client_id, lease_time=None, limit=None): - """Leases available client action requests for the client with the given id. - - Args: - client_id: The client for which the requests should be leased. - lease_time: rdfvalue.Duration indicating how long the lease should be - valid. - limit: Lease at most requests. If set, must be less than 10000. - Default is 5000. - - Returns: - A list of ClientActionRequest objects. - """ - - @abc.abstractmethod - def ReadAllClientActionRequests(self, client_id): - """Reads all client action requests available for a given client_id. - - Args: - client_id: The client for which the requests should be read. - - Returns: - A list of ClientActionRequest objects. - """ - - @abc.abstractmethod - def DeleteClientActionRequests(self, requests): - """Deletes a list of client action requests from the db. - - Args: - requests: A list of ClientActionRequest objects to delete. - """ - @abc.abstractmethod def WriteFlowObject(self, flow_obj, allow_update=True): """Writes a flow object to the database. @@ -3133,9 +3135,9 @@ def DeleteArtifact(self, name): precondition.AssertType(name, Text) return self.delegate.DeleteArtifact(name) - def WriteClientMetadata( + def MultiWriteClientMetadata( self, - client_id: str, + client_ids: Collection[str], certificate: Optional[rdf_crypto.RDFX509Cert] = None, first_seen: Optional[rdfvalue.RDFDatetime] = None, last_ping: Optional[rdfvalue.RDFDatetime] = None, @@ -3144,7 +3146,7 @@ def WriteClientMetadata( last_foreman: Optional[rdfvalue.RDFDatetime] = None, fleetspeak_validation_info: Optional[Mapping[str, str]] = None, ) -> None: - precondition.ValidateClientId(client_id) + _ValidateClientIds(client_ids) precondition.AssertOptionalType(certificate, rdf_crypto.RDFX509Cert) precondition.AssertOptionalType(first_seen, rdfvalue.RDFDatetime) precondition.AssertOptionalType(last_ping, rdfvalue.RDFDatetime) @@ -3155,8 +3157,8 @@ def WriteClientMetadata( if fleetspeak_validation_info is not None: precondition.AssertDictType(fleetspeak_validation_info, str, str) - return self.delegate.WriteClientMetadata( - client_id, + return self.delegate.MultiWriteClientMetadata( + client_ids=client_ids, certificate=certificate, first_seen=first_seen, last_ping=last_ping, @@ -3833,30 +3835,6 @@ def ReadHashBlobReferences(self, hashes): precondition.AssertIterableType(hashes, rdf_objects.SHA256HashID) return self.delegate.ReadHashBlobReferences(hashes) - def WriteClientActionRequests(self, requests): - for request in requests: - precondition.AssertType(request, rdf_flows.ClientActionRequest) - return self.delegate.WriteClientActionRequests(requests) - - def LeaseClientActionRequests(self, client_id, lease_time=None, limit=5000): - precondition.ValidateClientId(client_id) - _ValidateDuration(lease_time) - precondition.AssertType(limit, int) - if limit >= 10000: - raise ValueError("Limit of %d is too high.") - - return self.delegate.LeaseClientActionRequests( - client_id, lease_time=lease_time, limit=limit) - - def ReadAllClientActionRequests(self, client_id): - precondition.ValidateClientId(client_id) - return self.delegate.ReadAllClientActionRequests(client_id) - - def DeleteClientActionRequests(self, requests): - for request in requests: - precondition.AssertType(request, rdf_flows.ClientActionRequest) - return self.delegate.DeleteClientActionRequests(requests) - def WriteFlowObject(self, flow_obj, allow_update=True): precondition.AssertType(flow_obj, rdf_flow_objects.Flow) precondition.AssertType(allow_update, bool) diff --git a/grr/server/grr_response_server/databases/db_clients_test.py b/grr/server/grr_response_server/databases/db_clients_test.py index 71757a513d..3ea96815dc 100644 --- a/grr/server/grr_response_server/databases/db_clients_test.py +++ b/grr/server/grr_response_server/databases/db_clients_test.py @@ -146,6 +146,65 @@ def testClientMetadataInitialWrite(self): self.assertEqual(m2.certificate, CERT) self.assertEqual(m2.first_seen, rdfvalue.RDFDatetime(100000000)) + def testClientMetadataDefaultValues(self): + d = self.db + + client_id = "C.ab413187fefa1dcf" + # Empty initialization + d.WriteClientMetadata(client_id) + + # Check NULL/empty default values + md = d.ReadClientMetadata(client_id) + self.assertIsNone(md.certificate) + self.assertIsNone(md.first_seen) + self.assertIsNone(md.ping) + self.assertIsNone(md.clock) + self.assertIsNone(md.last_foreman_time) + self.assertIsNone(md.last_crash_timestamp) + self.assertIsNone(md.startup_info_timestamp) + self.assertFalse(md.ip) + self.assertFalse(md.last_fleetspeak_validation_info) + + def testClientMetadataSkipFields(self): + client_id = "C.fc413187fefa1dcf" + self.db.WriteClientMetadata( + client_id, + certificate=CERT, + first_seen=rdfvalue.RDFDatetime(100000000), + last_clock=rdfvalue.RDFDatetime(100000001), + last_foreman=rdfvalue.RDFDatetime(100000002), + last_ping=rdfvalue.RDFDatetime(100000003), + last_ip=rdf_client_network.NetworkAddress( + human_readable_address="8.8.8.8" + ), + fleetspeak_validation_info={"foo": "bar"}, + ) + # Skip fields + self.db.WriteClientMetadata( + client_id, + certificate=None, + first_seen=None, + last_clock=None, + last_foreman=None, + last_ping=None, + last_ip=None, + fleetspeak_validation_info=None, + ) + + md = self.db.ReadClientMetadata(client_id) + self.assertEqual(md.certificate, CERT) + self.assertEqual(md.first_seen, rdfvalue.RDFDatetime(100000000)) + self.assertEqual(md.clock, rdfvalue.RDFDatetime(100000001)) + self.assertEqual(md.last_foreman_time, rdfvalue.RDFDatetime(100000002)) + self.assertEqual(md.ping, rdfvalue.RDFDatetime(100000003)) + self.assertEqual( + md.ip, + rdf_client_network.NetworkAddress(human_readable_address="8.8.8.8"), + ) + self.assertEqual( + md.last_fleetspeak_validation_info.ToStringDict(), {"foo": "bar"} + ) + def testClientMetadataSubsecond(self): client_id = "C.fc413187fefa1dcf" self.db.WriteClientMetadata( @@ -189,6 +248,38 @@ def testClientMetadataPing(self): rdf_client_network.NetworkAddress(human_readable_address="8.8.8.8")) self.assertEqual(m1.last_foreman_time, rdfvalue.RDFDatetime(220000000000)) + def testMultiWriteClientMetadata(self): + d = self.db + + client_id_1 = db_test_utils.InitializeClient(self.db) + client_id_2 = db_test_utils.InitializeClient(self.db) + + d.MultiWriteClientMetadata( + [client_id_1, client_id_2], last_foreman=rdfvalue.RDFDatetime(100000034) + ) + + res = d.MultiReadClientMetadata([client_id_1, client_id_2]) + self.assertLen(res, 2) + + m1 = res[client_id_1] + self.assertEqual(m1.last_foreman_time, rdfvalue.RDFDatetime(100000034)) + + m2 = res[client_id_2] + self.assertEqual(m2.last_foreman_time, rdfvalue.RDFDatetime(100000034)) + + def testMultiWriteClientMetadataNoValues(self): + client_id_1 = db_test_utils.InitializeClient(self.db) + client_id_2 = db_test_utils.InitializeClient(self.db) + + self.db.MultiWriteClientMetadata( + [client_id_1, client_id_2] + ) # Should not fail. + + def testMultiWriteClientMetadataNoClients(self): + self.db.MultiWriteClientMetadata( + [], last_foreman=rdfvalue.RDFDatetime(100000035) + ) # Should not fail. + def testClientMetadataValidatesIP(self): d = self.db client_id = "C.fc413187fefa1dcf" @@ -2097,12 +2188,6 @@ def _AddClientKeyedData(self, client_id): self.db.WriteFlowProcessingRequests( [rdf_flows.FlowProcessingRequest(client_id=client_id, flow_id=flow_id)]) - # A client action request. - self.db.WriteClientActionRequests([ - rdf_flows.ClientActionRequest( - client_id=client_id, flow_id=flow_id, request_id=1) - ]) - return flow_id def _CheckClientKeyedDataWasDeleted(self, client_id, flow_id): @@ -2128,9 +2213,6 @@ def _CheckClientKeyedDataWasDeleted(self, client_id, flow_id): with self.assertRaises(db.UnknownFlowError): self.db.ReadFlowObject(client_id, flow_id) - # A client action request. - self.assertEmpty(self.db.ReadAllClientActionRequests(client_id)) - def testDeleteClient(self): client_id = db_test_utils.InitializeClient(self.db) @@ -2266,7 +2348,7 @@ def testRemovesFleetspeakValidationInfoWhenValidationInfoIsEmpty(self): metadata = res[client_id] self.assertFalse(metadata.last_fleetspeak_validation_info) - def testRemovesFleetspeakValidationInfoWhenValidationInfoIsNotPresent(self): + def testKeepsFleetspeakValidationInfoWhenValidationInfoIsNotPresent(self): client_id = "C.fc413187fefa1dcf" self.db.WriteClientMetadata( @@ -2276,7 +2358,9 @@ def testRemovesFleetspeakValidationInfoWhenValidationInfoIsNotPresent(self): res = self.db.MultiReadClientMetadata([client_id]) self.assertLen(res, 1) metadata = res[client_id] - self.assertFalse(metadata.last_fleetspeak_validation_info) + self.assertEqual( + metadata.last_fleetspeak_validation_info.ToStringDict(), {"foo": "bar"} + ) # This file is a test library and thus does not require a __main__ block. diff --git a/grr/server/grr_response_server/databases/db_flows_test.py b/grr/server/grr_response_server/databases/db_flows_test.py index bc2b5928ca..ae51bb9f1d 100644 --- a/grr/server/grr_response_server/databases/db_flows_test.py +++ b/grr/server/grr_response_server/databases/db_flows_test.py @@ -29,186 +29,6 @@ class DatabaseTestFlowMixin(object): This mixin adds methods to test the handling of flows. """ - def testClientActionRequestStorage(self): - client_id = db_test_utils.InitializeClient(self.db) - flow_id = db_test_utils.InitializeFlow(self.db, client_id) - - self.db.WriteFlowRequests([ - rdf_flow_objects.FlowRequest( - client_id=client_id, flow_id=flow_id, request_id=1) - ]) - req = rdf_flows.ClientActionRequest( - client_id=client_id, flow_id=flow_id, request_id=1) - - self.db.WriteClientActionRequests([req]) - - read_reqs = self.db.ReadAllClientActionRequests(client_id) - self.assertLen(read_reqs, 1) - self.assertEqual(req, read_reqs[0]) - - self.db.DeleteClientActionRequests([req]) - read_reqs = self.db.ReadAllClientActionRequests(client_id) - self.assertEmpty(read_reqs) - - # Extra delete should not raise. - self.db.DeleteClientActionRequests([req]) - - # Deleting the same message multiple times is an error. - with self.assertRaises(ValueError): - self.db.DeleteClientActionRequests([req, req]) - - def testWriteClientActionRequestsRaisesOnUnknownRequest(self): - req = rdf_flows.ClientActionRequest( - client_id=u"C.1234567890000000", flow_id="ABCD1234", request_id=5) - with self.assertRaises(db.AtLeastOneUnknownRequestError): - self.db.WriteClientActionRequests([req]) - - def testClientActionRequestUpdate(self): - client_id = db_test_utils.InitializeClient(self.db) - flow_id = db_test_utils.InitializeFlow(self.db, client_id) - - req = rdf_flows.ClientActionRequest( - client_id=client_id, flow_id=flow_id, request_id=1) - self.db.WriteFlowRequests([ - rdf_flow_objects.FlowRequest( - client_id=client_id, flow_id=flow_id, request_id=1) - ]) - - cpu_limit = req.cpu_limit_ms - self.assertGreater(cpu_limit, 1000000) - - for _ in range(5): - req.cpu_limit_ms -= 100000 - self.db.WriteClientActionRequests([req]) - read_reqs = self.db.ReadAllClientActionRequests(client_id) - self.assertLen(read_reqs, 1) - self.assertEqual(req, read_reqs[0]) - - def testClientActionRequestLeasing(self): - client_id = db_test_utils.InitializeClient(self.db) - flow_id = db_test_utils.InitializeFlow(self.db, client_id) - - flow_requests = [] - client_requests = [] - for i in range(10): - flow_requests.append( - rdf_flow_objects.FlowRequest( - client_id=client_id, flow_id=flow_id, request_id=i)) - client_requests.append( - rdf_flows.ClientActionRequest( - client_id=client_id, flow_id=flow_id, request_id=i)) - - lease_time = rdfvalue.Duration.From(5, rdfvalue.MINUTES) - self.db.WriteFlowRequests(flow_requests) - self.db.WriteClientActionRequests(client_requests) - - t0 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000) - with test_lib.FakeTime(t0): - t0_expiry = t0 + lease_time - leased = self.db.LeaseClientActionRequests( - client_id, lease_time=lease_time, limit=5) - - self.assertLen(leased, 5) - - for request in leased: - self.assertEqual(request.leased_until, t0_expiry) - self.assertEqual(request.leased_by, utils.ProcessIdString()) - - t1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 100) - with test_lib.FakeTime(t1): - t1_expiry = t1 + lease_time - leased = self.db.LeaseClientActionRequests( - client_id, lease_time=lease_time, limit=5) - - self.assertLen(leased, 5) - - for request in leased: - self.assertEqual(request.leased_until, t1_expiry) - self.assertEqual(request.leased_by, utils.ProcessIdString()) - - # Nothing left to lease. - leased = self.db.LeaseClientActionRequests( - client_id, lease_time=lease_time, limit=2) - - self.assertEmpty(leased) - - read = self.db.ReadAllClientActionRequests(client_id) - - self.assertLen(read, 10) - for r in read: - self.assertEqual(r.leased_by, utils.ProcessIdString()) - - self.assertLen([r for r in read if r.leased_until == t0_expiry], 5) - self.assertLen([r for r in read if r.leased_until == t1_expiry], 5) - - # Half the leases expired. - t2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 350) - with test_lib.FakeTime(t2): - leased = self.db.LeaseClientActionRequests( - client_id, lease_time=lease_time) - - self.assertLen(leased, 5) - - # All of them expired. - t3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 10350) - with test_lib.FakeTime(t3): - leased = self.db.LeaseClientActionRequests( - client_id, lease_time=lease_time) - - self.assertLen(leased, 10) - - def testClientActionRequestsTTL(self): - client_id = db_test_utils.InitializeClient(self.db) - flow_id = db_test_utils.InitializeFlow(self.db, client_id) - - flow_requests = [] - client_requests = [] - for i in range(10): - flow_requests.append( - rdf_flow_objects.FlowRequest( - client_id=client_id, flow_id=flow_id, request_id=i)) - client_requests.append( - rdf_flows.ClientActionRequest( - client_id=client_id, flow_id=flow_id, request_id=i)) - self.db.WriteFlowRequests(flow_requests) - self.db.WriteClientActionRequests(client_requests) - - reqs = self.db.ReadAllClientActionRequests(client_id) - self.assertLen(reqs, 10) - - for request in reqs: - self.assertEqual(request.ttl, db.Database.CLIENT_MESSAGES_TTL) - - now = rdfvalue.RDFDatetime.Now() - lease_time = rdfvalue.Duration.From(60, rdfvalue.SECONDS) - - for i in range(db.Database.CLIENT_MESSAGES_TTL): - now += rdfvalue.Duration.From(120, rdfvalue.SECONDS) - with test_lib.FakeTime(now): - leased = self.db.LeaseClientActionRequests( - client_id, lease_time=lease_time, limit=10) - self.assertLen(leased, 10) - - # Check that the ttl is read. - for request in leased: - self.assertEqual(request.ttl, db.Database.CLIENT_MESSAGES_TTL - i - 1) - - reqs = self.db.ReadAllClientActionRequests(client_id) - self.assertLen(reqs, 10) - - for request in reqs: - self.assertEqual(request.ttl, db.Database.CLIENT_MESSAGES_TTL - i - 1) - - now += rdfvalue.Duration.From(120, rdfvalue.SECONDS) - with test_lib.FakeTime(now): - leased = self.db.LeaseClientActionRequests( - client_id, lease_time=lease_time, limit=10) - self.assertEmpty(leased) - - # ReadAllClientActionRequests includes also requests whose TTL has - # expired. Make sure that the requests have been deleted from the db. - self.assertEqual(self.db.ReadAllClientActionRequests(client_id), []) - def testFlowWritingUnknownClient(self): flow_id = u"1234ABCD" client_id = u"C.1234567890123456" @@ -882,7 +702,6 @@ def testDeleteFlowRequests(self): requests = [] responses = [] - client_requests = [] for request_id in range(1, 4): requests.append( rdf_flow_objects.FlowRequest( @@ -893,13 +712,9 @@ def testDeleteFlowRequests(self): flow_id=flow_id, request_id=request_id, response_id=1)) - client_requests.append( - rdf_flows.ClientActionRequest( - client_id=client_id, flow_id=flow_id, request_id=request_id)) self.db.WriteFlowRequests(requests) self.db.WriteFlowResponses(responses) - self.db.WriteClientActionRequests(client_requests) request_list = self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id) self.assertCountEqual([req.request_id for req, _ in request_list], @@ -1418,29 +1233,6 @@ def testRewritingResponsesForIncrementalRequestsTriggersMoreProcessing(self): requests_to_process = self.db.ReadFlowProcessingRequests() self.assertLen(requests_to_process, 1) - def testResponsesAnyRequestTriggerClientActionRequestDeletion(self): - # Write a flow that is waiting for request #2. - client_id = db_test_utils.InitializeClient(self.db) - flow_id = db_test_utils.InitializeFlow( - self.db, client_id, next_request_to_process=2) - - for i in range(5): - self.db.WriteFlowRequests([ - rdf_flow_objects.FlowRequest( - client_id=client_id, flow_id=flow_id, request_id=i) - ]) - - req = rdf_flows.ClientActionRequest( - client_id=client_id, flow_id=flow_id, request_id=3) - self.db.WriteClientActionRequests([req]) - - self.assertTrue(self.db.ReadAllClientActionRequests(client_id)) - - self._WriteCompleteResponses( - client_id, flow_id, request_id=3, num_responses=3) - - self.assertFalse(self.db.ReadAllClientActionRequests(client_id)) - def testLeaseFlowForProcessingRaisesIfParentHuntIsStoppedOrCompleted(self): hunt_id = db_test_utils.InitializeHunt(self.db) self.db.UpdateHuntObject( @@ -1789,25 +1581,6 @@ def testDeleteAllFlowRequestsAndResponses(self): all_requests = self.db.ReadAllFlowRequestsAndResponses(client_id1, flow_id2) self.assertEqual(all_requests, []) - def testDeleteAllFlowRequestsAndResponsesWithClientRequests(self): - client_id = u"C.1234567890123456" - flow_id = u"1234ABCD" - - self.db.WriteClientMetadata(client_id) - - self._WriteRequestAndResponses(client_id, flow_id) - - req = rdf_flows.ClientActionRequest( - client_id=client_id, flow_id=flow_id, request_id=1) - self.db.WriteClientActionRequests([req]) - - self._CheckRequestsAndResponsesAreThere(client_id, flow_id) - - self.db.DeleteAllFlowRequestsAndResponses(client_id, flow_id) - - self.assertEmpty( - self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id)) - def testReadFlowRequestsReadyForProcessing(self): client_id = u"C.1234567890000000" flow_id = u"12344321" @@ -2062,8 +1835,8 @@ def Callback(request): request_id=request.request_id, response_id=0, # For the purpose of the test, the payload can be arbitrary, - # using rdf_flows.ClientActionRequest as a sample struct. - payload=rdf_flows.ClientActionRequest(), + # using rdf_flow_objects.FlowRequest as a sample struct. + payload=rdf_flow_objects.FlowRequest(), ) self.db.WriteFlowResponses([response]) diff --git a/grr/server/grr_response_server/databases/db_foreman_rules_test.py b/grr/server/grr_response_server/databases/db_foreman_rules_test.py index d10c0600ab..6f9dbcdbdc 100644 --- a/grr/server/grr_response_server/databases/db_foreman_rules_test.py +++ b/grr/server/grr_response_server/databases/db_foreman_rules_test.py @@ -41,27 +41,27 @@ def testForemanRuleWrite(self): self.assertEqual(read[0], rule) def testForemanRuleRemove(self): - db_test_utils.InitializeHunt(self.db, "H:123456") - rule1 = self._GetTestRule("H:123456") + db_test_utils.InitializeHunt(self.db, "123456") + rule1 = self._GetTestRule("123456") self.db.WriteForemanRule(rule1) - db_test_utils.InitializeHunt(self.db, "H:654321") - rule2 = self._GetTestRule("H:654321") + db_test_utils.InitializeHunt(self.db, "654321") + rule2 = self._GetTestRule("654321") self.db.WriteForemanRule(rule2) - db_test_utils.InitializeHunt(self.db, "H:ABCDEF") - rule3 = self._GetTestRule("H:ABCDEF") + db_test_utils.InitializeHunt(self.db, "ABCDEF") + rule3 = self._GetTestRule("ABCDEF") self.db.WriteForemanRule(rule3) read = self.db.ReadAllForemanRules() self.assertLen(read, 3) - self.db.RemoveForemanRule("H:654321") + self.db.RemoveForemanRule("654321") read = self.db.ReadAllForemanRules() self.assertLen(read, 2) self.assertCountEqual(read, [rule1, rule3]) - self.db.RemoveForemanRule("H:123456") + self.db.RemoveForemanRule("123456") read = self.db.ReadAllForemanRules() self.assertLen(read, 1) self.assertEqual(read[0], rule3) diff --git a/grr/server/grr_response_server/databases/db_test_utils_test.py b/grr/server/grr_response_server/databases/db_test_utils_test.py index 719a0d7576..64c10d7b94 100644 --- a/grr/server/grr_response_server/databases/db_test_utils_test.py +++ b/grr/server/grr_response_server/databases/db_test_utils_test.py @@ -179,6 +179,21 @@ def testSupplied(self): self.assertEqual(client_id, "C.012345678ABCDEFAA") self.assertIsNotNone(db.ReadClientMetadata(client_id)) + def testInitialValues(self): + db = mem_db.InMemoryDB() + + client_id = db_test_utils.InitializeClient(db) + md = db.ReadClientMetadata(client_id) + self.assertIsNone(md.certificate) + self.assertIsNone(md.first_seen) + self.assertIsNone(md.ping) + self.assertIsNone(md.clock) + self.assertIsNone(md.last_foreman_time) + self.assertIsNone(md.last_crash_timestamp) + self.assertIsNone(md.startup_info_timestamp) + self.assertFalse(md.ip) + self.assertFalse(md.last_fleetspeak_validation_info) + class InitializeRRGClientTest(absltest.TestCase): diff --git a/grr/server/grr_response_server/databases/db_utils.py b/grr/server/grr_response_server/databases/db_utils.py index 14052a9638..8a3726b2a1 100644 --- a/grr/server/grr_response_server/databases/db_utils.py +++ b/grr/server/grr_response_server/databases/db_utils.py @@ -151,18 +151,10 @@ def IntToFlowID(flow_id): def HuntIDToInt(hunt_id): """Convert hunt id string to an integer.""" - # TODO(user): This code is only needed for a brief period of time when we - # allow running new rel-db flows with old aff4-based hunts. In this scenario - # parent_hunt_id is effectively not used, but it has to be an - # integer. Stripping "H:" from hunt ids then makes the rel-db happy. Remove - # this code when hunts are rel-db only. - if hunt_id.startswith("H:"): - hunt_id = hunt_id[2:] - try: return int(hunt_id or "0", 16) except ValueError as e: - raise HuntIDIsNotAnIntegerError(e) + raise HuntIDIsNotAnIntegerError(e) from e def IntToHuntID(hunt_id): @@ -173,7 +165,7 @@ def OutputPluginIDToInt(output_plugin_id): try: return int(output_plugin_id or "0", 16) except ValueError as e: - raise OutputPluginIDIsNotAnIntegerError(e) + raise OutputPluginIDIsNotAnIntegerError(e) from e def IntToOutputPluginID(output_plugin_id): diff --git a/grr/server/grr_response_server/databases/mem.py b/grr/server/grr_response_server/databases/mem.py index d6bfd54481..ffa802dd83 100644 --- a/grr/server/grr_response_server/databases/mem.py +++ b/grr/server/grr_response_server/databases/mem.py @@ -58,8 +58,6 @@ def _Init(self): self.approvals_by_username = {} self.blob_keys: dict[rdf_objects.BlobID, str] = {} self.clients = {} - self.client_action_requests = {} - self.client_action_request_leases = {} self.client_stats = collections.defaultdict(dict) self.crash_history = {} self.cronjob_leases = {} diff --git a/grr/server/grr_response_server/databases/mem_clients.py b/grr/server/grr_response_server/databases/mem_clients.py index 8b6e138a97..390d24086b 100644 --- a/grr/server/grr_response_server/databases/mem_clients.py +++ b/grr/server/grr_response_server/databases/mem_clients.py @@ -22,9 +22,9 @@ class InMemoryDBClientMixin(object): """InMemoryDB mixin for client related functions.""" @utils.Synchronized - def WriteClientMetadata( + def MultiWriteClientMetadata( self, - client_id: str, + client_ids: Collection[str], certificate: Optional[rdf_crypto.RDFX509Cert] = None, first_seen: Optional[rdfvalue.RDFDatetime] = None, last_ping: Optional[rdfvalue.RDFDatetime] = None, @@ -33,7 +33,7 @@ def WriteClientMetadata( last_foreman: Optional[rdfvalue.RDFDatetime] = None, fleetspeak_validation_info: Optional[Mapping[str, str]] = None, ) -> None: - """Write metadata about the client.""" + """Writes metadata about the clients.""" md = {} if certificate is not None: md["certificate"] = certificate @@ -53,15 +53,18 @@ def WriteClientMetadata( if last_foreman is not None: md["last_foreman_time"] = last_foreman - if fleetspeak_validation_info: - pb = rdf_client.FleetspeakValidationInfo.FromStringDict( - fleetspeak_validation_info) - md["last_fleetspeak_validation_info"] = pb.SerializeToBytes() - else: - # Write null for empty or non-existent validation info. - md["last_fleetspeak_validation_info"] = None + if fleetspeak_validation_info is not None: + if fleetspeak_validation_info: + pb = rdf_client.FleetspeakValidationInfo.FromStringDict( + fleetspeak_validation_info + ) + md["last_fleetspeak_validation_info"] = pb.SerializeToBytes() + else: + # Write null for empty or non-existent validation info. + md["last_fleetspeak_validation_info"] = None - self.metadatas.setdefault(client_id, {}).update(md) + for client_id in client_ids: + self.metadatas.setdefault(client_id, {}).update(md) @utils.Synchronized def MultiReadClientMetadata(self, client_ids): @@ -542,8 +545,6 @@ def DeleteClient(self, client_id): self.flow_requests.pop(key) for key in [k for k in self.flow_processing_requests if k[0] == client_id]: self.flow_processing_requests.pop(key) - for key in [k for k in self.client_action_requests if k[0] == client_id]: - self.client_action_requests.pop(key) for kw in self.keywords: self.keywords[kw].pop(client_id, None) diff --git a/grr/server/grr_response_server/databases/mem_flows.py b/grr/server/grr_response_server/databases/mem_flows.py index ef8051deb8..b9ef03fd33 100644 --- a/grr/server/grr_response_server/databases/mem_flows.py +++ b/grr/server/grr_response_server/databases/mem_flows.py @@ -124,101 +124,6 @@ def _LeaseMessageHandlerRequests(self, lease_time, limit): return leased_requests - @utils.Synchronized - def ReadAllClientActionRequests(self, client_id): - """Reads all client action requests available for a given client_id.""" - res = [] - for key, orig_request in self.client_action_requests.items(): - request_client_id, _, _ = key - if request_client_id != client_id: - continue - - request = orig_request.Copy() - current_lease = self.client_action_request_leases.get(key) - request.ttl = db.Database.CLIENT_MESSAGES_TTL - if current_lease is not None: - request.leased_until, request.leased_by, leased_count = current_lease - request.ttl -= leased_count - else: - request.leased_until = None - request.leased_by = None - res.append(request) - - return res - - def _DeleteClientActionRequest(self, client_id, flow_id, request_id): - key = (client_id, flow_id, request_id) - self.client_action_requests.pop(key, None) - self.client_action_request_leases.pop(key, None) - - @utils.Synchronized - def DeleteClientActionRequests(self, requests): - """Deletes a list of client action requests from the db.""" - to_delete = [] - for r in requests: - to_delete.append((r.client_id, r.flow_id, r.request_id)) - - if len(set(to_delete)) != len(to_delete): - raise ValueError( - "Received multiple copies of the same action request to delete.") - - for client_id, flow_id, request_id in to_delete: - self._DeleteClientActionRequest(client_id, flow_id, request_id) - - @utils.Synchronized - def LeaseClientActionRequests(self, - client_id, - lease_time=None, - limit=sys.maxsize): - """Leases available client action requests for a client.""" - - leased_requests = [] - - now = rdfvalue.RDFDatetime.Now() - expiration_time = now + lease_time - process_id_str = utils.ProcessIdString() - - leases = self.client_action_request_leases - # Can't use an iterator here since the dict might change when requests get - # deleted. - for key, request in sorted(self.client_action_requests.items()): - if key[0] != client_id: - continue - - existing_lease = leases.get(key) - if not existing_lease or existing_lease[0] < now: - if existing_lease: - lease_count = existing_lease[-1] + 1 - if lease_count > db.Database.CLIENT_MESSAGES_TTL: - self._DeleteClientActionRequest(*key) - continue - else: - lease_count = 1 - - leases[key] = (expiration_time, process_id_str, lease_count) - request.leased_until = expiration_time - request.leased_by = process_id_str - request.ttl = db.Database.CLIENT_MESSAGES_TTL - lease_count - leased_requests.append(request) - if len(leased_requests) >= limit: - break - - return leased_requests - - @utils.Synchronized - def WriteClientActionRequests(self, requests): - """Writes messages that should go to the client to the db.""" - for r in requests: - req_dict = self.flow_requests.get((r.client_id, r.flow_id), {}) - if r.request_id not in req_dict: - request_keys = [(r.client_id, r.flow_id, r.request_id) for r in requests - ] - raise db.AtLeastOneUnknownRequestError(request_keys) - - for r in requests: - request_key = (r.client_id, r.flow_id, r.request_id) - self.client_action_requests[request_key] = r - @utils.Synchronized def WriteFlowObject(self, flow_obj, allow_update=True): """Writes a flow object to the database.""" @@ -466,7 +371,6 @@ def WriteFlowResponses(self, responses): if len(responses) == request.nr_responses_expected: request.needs_processing = True - self._DeleteClientActionRequest(client_id, flow_id, request_id) if flow.next_request_to_process == request_id: added_for_processing = True diff --git a/grr/server/grr_response_server/databases/mysql_clients.py b/grr/server/grr_response_server/databases/mysql_clients.py index a84f865669..91a53d6fdf 100644 --- a/grr/server/grr_response_server/databases/mysql_clients.py +++ b/grr/server/grr_response_server/databases/mysql_clients.py @@ -26,9 +26,9 @@ class MySQLDBClientMixin(object): """MySQLDataStore mixin for client related functions.""" @mysql_utils.WithTransaction() - def WriteClientMetadata( + def MultiWriteClientMetadata( self, - client_id: str, + client_ids: Collection[str], certificate: Optional[rdf_crypto.RDFX509Cert] = None, first_seen: Optional[rdfvalue.RDFDatetime] = None, last_ping: Optional[rdfvalue.RDFDatetime] = None, @@ -38,55 +38,77 @@ def WriteClientMetadata( fleetspeak_validation_info: Optional[Mapping[str, str]] = None, cursor: Optional[MySQLdb.cursors.Cursor] = None, ) -> None: - """Write metadata about the client.""" - placeholders = [] + """Writes metadata about the clients.""" + # Early return to avoid generating empty query. + if not client_ids: + return + + common_placeholders = [] values = dict() + column_names = ["client_id"] - placeholders.append("%(client_id)s") - values["client_id"] = db_utils.ClientIDToInt(client_id) + for i, client_id in enumerate(client_ids): + values[f"client_id{i}"] = db_utils.ClientIDToInt(client_id) - if certificate: - placeholders.append("%(certificate)s") + if certificate is not None: + column_names.append("certificate") + common_placeholders.append("%(certificate)s") values["certificate"] = certificate.SerializeToBytes() if first_seen is not None: - placeholders.append("FROM_UNIXTIME(%(first_seen)s)") + column_names.append("first_seen") + common_placeholders.append("FROM_UNIXTIME(%(first_seen)s)") values["first_seen"] = mysql_utils.RDFDatetimeToTimestamp(first_seen) if last_ping is not None: - placeholders.append("FROM_UNIXTIME(%(last_ping)s)") + column_names.append("last_ping") + common_placeholders.append("FROM_UNIXTIME(%(last_ping)s)") values["last_ping"] = mysql_utils.RDFDatetimeToTimestamp(last_ping) - if last_clock: - placeholders.append("FROM_UNIXTIME(%(last_clock)s)") + if last_clock is not None: + column_names.append("last_clock") + common_placeholders.append("FROM_UNIXTIME(%(last_clock)s)") values["last_clock"] = mysql_utils.RDFDatetimeToTimestamp(last_clock) - if last_ip: - placeholders.append("%(last_ip)s") + if last_ip is not None: + column_names.append("last_ip") + common_placeholders.append("%(last_ip)s") values["last_ip"] = last_ip.SerializeToBytes() - if last_foreman: - placeholders.append("FROM_UNIXTIME(%(last_foreman)s)") + if last_foreman is not None: + column_names.append("last_foreman") + common_placeholders.append("FROM_UNIXTIME(%(last_foreman)s)") values["last_foreman"] = mysql_utils.RDFDatetimeToTimestamp(last_foreman) - placeholders.append("%(last_fleetspeak_validation_info)s") - if fleetspeak_validation_info: - pb = rdf_client.FleetspeakValidationInfo.FromStringDict( - fleetspeak_validation_info) - values["last_fleetspeak_validation_info"] = pb.SerializeToBytes() - else: - # Write null for empty or non-existent validation info. - values["last_fleetspeak_validation_info"] = None - - updates = [] - for column in values: - updates.append("{column} = VALUES({column})".format(column=column)) - - query = """ - INSERT INTO clients ({columns}) - VALUES ({placeholders}) - ON DUPLICATE KEY UPDATE {updates} - """.format( - columns=", ".join(values.keys()), - placeholders=", ".join(placeholders), - updates=", ".join(updates)) + if fleetspeak_validation_info is not None: + column_names.append("last_fleetspeak_validation_info") + common_placeholders.append("%(last_fleetspeak_validation_info)s") + if fleetspeak_validation_info: + pb = rdf_client.FleetspeakValidationInfo.FromStringDict( + fleetspeak_validation_info + ) + values["last_fleetspeak_validation_info"] = pb.SerializeToBytes() + else: + # Write null for empty or non-existent validation info. + values["last_fleetspeak_validation_info"] = None + + # For each client_id, we create a row tuple with a numbered client id + # placeholder followed by common placeholder values for the columns being + # updated. Example query string: + # INSERT INTO clients + # VALUES (%(client_id0)s, %(last_ip)s), (%(client_id1)s, %(last_ip)s) + # ON DUPLICATE KEY UPDATE + # client_id = VALUES(client_id), last_ip = VALUES(last_ip) + row_tuples = [] + for i, client_id in enumerate(client_ids): + row_placeholders = ", ".join([f"%(client_id{i})s"] + common_placeholders) + row_tuples.append(f"({row_placeholders})") + + column_updates = [f"{column} = VALUES({column})" for column in column_names] - cursor.execute(query, values) + cursor.execute( + f""" + INSERT INTO clients ({', '.join(column_names)}) + VALUES {', '.join(row_tuples)} + ON DUPLICATE KEY UPDATE {', '.join(column_updates)} + """, + values, + ) @mysql_utils.WithTransaction(readonly=True) def MultiReadClientMetadata(self, client_ids, cursor=None): diff --git a/grr/server/grr_response_server/databases/mysql_flows.py b/grr/server/grr_response_server/databases/mysql_flows.py index e42c163213..7c52e2942c 100644 --- a/grr/server/grr_response_server/databases/mysql_flows.py +++ b/grr/server/grr_response_server/databases/mysql_flows.py @@ -150,124 +150,6 @@ def _LeaseMessageHandlerRequests(self, lease_time, limit, cursor=None): return res - @mysql_utils.WithTransaction(readonly=True) - def ReadAllClientActionRequests(self, client_id, cursor=None): - """Reads all client messages available for a given client_id.""" - - query = ("SELECT request, UNIX_TIMESTAMP(leased_until), leased_by, " - "leased_count " - "FROM client_action_requests " - "WHERE client_id = %s") - - cursor.execute(query, [db_utils.ClientIDToInt(client_id)]) - - ret = [] - for req, leased_until, leased_by, leased_count in cursor.fetchall(): - request = rdf_flows.ClientActionRequest.FromSerializedBytes(req) - if leased_until is not None: - request.leased_by = leased_by - request.leased_until = mysql_utils.TimestampToRDFDatetime(leased_until) - else: - request.leased_by = None - request.leased_until = None - request.ttl = db.Database.CLIENT_MESSAGES_TTL - leased_count - ret.append(request) - - return sorted(ret, key=lambda req: (req.flow_id, req.request_id)) - - def DeleteClientActionRequests(self, requests): - """Deletes a list of client messages from the db.""" - if not requests: - return - - to_delete = [] - for r in requests: - to_delete.append((r.client_id, r.flow_id, r.request_id)) - - if len(set(to_delete)) != len(to_delete): - raise ValueError( - "Received multiple copies of the same message to delete.") - - self._DeleteClientActionRequest(to_delete) - - @mysql_utils.WithTransaction() - def LeaseClientActionRequests(self, - client_id, - lease_time=None, - limit=None, - cursor=None): - """Leases available client messages for the client with the given id.""" - - now = rdfvalue.RDFDatetime.Now() - now_str = mysql_utils.RDFDatetimeToTimestamp(now) - expiry = now + lease_time - expiry_str = mysql_utils.RDFDatetimeToTimestamp(expiry) - proc_id_str = utils.ProcessIdString() - client_id_int = db_utils.ClientIDToInt(client_id) - - query = ("UPDATE client_action_requests " - "SET leased_until=FROM_UNIXTIME(%s), leased_by=%s, " - "leased_count=leased_count+1 " - "WHERE client_id=%s AND " - "(leased_until IS NULL OR leased_until < FROM_UNIXTIME(%s)) " - "LIMIT %s") - args = [expiry_str, proc_id_str, client_id_int, now_str, limit] - - num_leased = cursor.execute(query, args) - if num_leased == 0: - return [] - - query = ("SELECT request, leased_count FROM client_action_requests " - "WHERE client_id=%s AND leased_until=FROM_UNIXTIME(%s) " - "AND leased_by=%s") - - cursor.execute(query, [client_id_int, expiry_str, proc_id_str]) - - ret = [] - expired = [] - for req, leased_count in cursor.fetchall(): - request = rdf_flows.ClientActionRequest.FromSerializedBytes(req) - request.leased_by = proc_id_str - request.leased_until = expiry - request.ttl = db.Database.CLIENT_MESSAGES_TTL - leased_count - # > comparison since this check happens after the lease. - if leased_count > db.Database.CLIENT_MESSAGES_TTL: - expired.append((request.client_id, request.flow_id, request.request_id)) - else: - ret.append(request) - - if expired: - self._DeleteClientActionRequest(expired, cursor=cursor) - - return sorted(ret, key=lambda req: (req.flow_id, req.request_id)) - - @mysql_utils.WithTransaction() - def WriteClientActionRequests(self, requests, cursor=None): - """Writes messages that should go to the client to the db.""" - - query = ("INSERT IGNORE INTO client_action_requests " - "(client_id, flow_id, request_id, timestamp, request) " - "VALUES %s ON DUPLICATE KEY UPDATE " - "timestamp=VALUES(timestamp), request=VALUES(request)") - now = mysql_utils.RDFDatetimeToTimestamp(rdfvalue.RDFDatetime.Now()) - - value_templates = [] - args = [] - for r in requests: - args.extend([ - db_utils.ClientIDToInt(r.client_id), - db_utils.FlowIDToInt(r.flow_id), r.request_id, now, - r.SerializeToBytes() - ]) - value_templates.append("(%s, %s, %s, FROM_UNIXTIME(%s), %s)") - - query %= ",".join(value_templates) - try: - cursor.execute(query, args) - except MySQLdb.IntegrityError as e: - request_keys = [(r.client_id, r.flow_id, r.request_id) for r in requests] - raise db.AtLeastOneUnknownRequestError(request_keys=request_keys, cause=e) - @mysql_utils.WithTransaction() def WriteFlowObject(self, flow_obj, allow_update=True, cursor=None): """Writes a flow object to the database.""" @@ -735,22 +617,6 @@ def _WriteResponses(self, responses, cursor): else: logging.warning("Response for unknown request: %s", responses[0]) - @mysql_utils.WithTransaction() - def _DeleteClientActionRequest(self, to_delete, cursor=None): - """Builds deletes for client messages.""" - query = "DELETE FROM client_action_requests WHERE " - conditions = [] - args = [] - - for client_id, flow_id, request_id in to_delete: - conditions.append("(client_id=%s AND flow_id=%s AND request_id=%s)") - args.append(db_utils.ClientIDToInt(client_id)) - args.append(db_utils.FlowIDToInt(flow_id)) - args.append(request_id) - - query += " OR ".join(conditions) - cursor.execute(query, args) - @mysql_utils.WithTransaction() def _WriteFlowResponsesAndExpectedUpdates(self, responses, cursor=None): """Writes a flow responses and updates flow requests expected counts.""" @@ -943,13 +809,8 @@ def WriteFlowResponses(self, responses): return for batch in collection.Batch(responses, self._WRITE_ROWS_BATCH_SIZE): - self._WriteFlowResponsesAndExpectedUpdates(batch) - - completed_requests = self._UpdateRequestsAndScheduleFPRs(batch) - - if completed_requests: - self._DeleteClientActionRequest(completed_requests) + self._UpdateRequestsAndScheduleFPRs(batch) @mysql_utils.WithTransaction() def UpdateIncrementalFlowRequests( diff --git a/grr/server/grr_response_server/databases/mysql_migrations/0021.sql b/grr/server/grr_response_server/databases/mysql_migrations/0021.sql new file mode 100644 index 0000000000..3937de9111 --- /dev/null +++ b/grr/server/grr_response_server/databases/mysql_migrations/0021.sql @@ -0,0 +1,2 @@ +-- Drop default value for `clients.first_seen`. +ALTER TABLE clients MODIFY first_seen TIMESTAMP(6) NULL DEFAULT NULL; diff --git a/grr/server/grr_response_server/export_converters/memory.py b/grr/server/grr_response_server/export_converters/memory.py index dc63fe5f3f..7b624260e1 100644 --- a/grr/server/grr_response_server/export_converters/memory.py +++ b/grr/server/grr_response_server/export_converters/memory.py @@ -43,7 +43,8 @@ def Convert( rule_name=yara_match.rule_name, process_scan_time_us=value.scan_time_us, string_id=yara_string_match.string_id, - offset=yara_string_match.offset) + offset=yara_string_match.offset, + ) class ProcessMemoryErrorConverter(base.ExportConverter): diff --git a/grr/server/grr_response_server/flow_base.py b/grr/server/grr_response_server/flow_base.py index 68aba3d9c9..659ccc1077 100644 --- a/grr/server/grr_response_server/flow_base.py +++ b/grr/server/grr_response_server/flow_base.py @@ -3,6 +3,7 @@ import collections import functools import logging +import re import traceback from typing import Any, Callable, Iterator, Mapping, NamedTuple, Optional, Sequence, Tuple, Type @@ -33,7 +34,10 @@ from grr_response_proto import rrg_pb2 FLOW_STARTS = metrics.Counter("flow_starts", fields=[("flow", str)]) -FLOW_ERRORS = metrics.Counter("flow_errors", fields=[("flow", str)]) +FLOW_ERRORS = metrics.Counter( + "flow_errors", + fields=[("flow", str), ("is_child", bool), ("exception", str)], +) FLOW_COMPLETIONS = metrics.Counter("flow_completions", fields=[("flow", str)]) GRR_WORKER_STATES_RUN = metrics.Counter("grr_worker_states_run") HUNT_OUTPUT_PLUGIN_ERRORS = metrics.Counter( @@ -41,6 +45,24 @@ HUNT_RESULTS_RAN_THROUGH_PLUGIN = metrics.Counter( "hunt_results_ran_through_plugin", fields=[("plugin", str)]) +_METRICS_UNKNOWN_EXCEPTION = "Unknown" +# Captures the possible exception name (only group). String must start with a +# capitalized letter (only letters) followed by an opening parens. +# Should match: "SomeWord(" (captures "SomeWord"), "A(" (captures "A") +# Should NOT match: "(", " Space(", "Sep arate(", "startsWithLower(", "HasNum9(" +_LOOKS_LIKE_EXCEPTION = re.compile(r"^([A-Z][A-Za-z]*)\(.*") + + +def _ExtractExceptionName(error_message: Optional[str]) -> str: + if not error_message: + return _METRICS_UNKNOWN_EXCEPTION + + match = _LOOKS_LIKE_EXCEPTION.match(error_message) + if match is None: + return _METRICS_UNKNOWN_EXCEPTION + + return match.groups()[0] + class Error(Exception): """Base class for this package's exceptions.""" @@ -378,15 +400,17 @@ def CallClient(self, "Runtime limit exceeded for {} {}.".format( self.rdf_flow.flow_class_name, self.rdf_flow.flow_id)) - client_action_request = rdf_flows.ClientActionRequest( - client_id=self.rdf_flow.client_id, - flow_id=self.rdf_flow.flow_id, + stub = action_registry.ACTION_STUB_BY_ID[action_identifier] + client_action_request = rdf_flows.GrrMessage( + session_id="%s/%s" % (self.rdf_flow.client_id, self.rdf_flow.flow_id), + name=stub.__name__, request_id=outbound_id, - action_identifier=action_identifier, - action_args=request, - cpu_limit_ms=cpu_limit_ms, + payload=request, network_bytes_limit=network_bytes_limit, - runtime_limit_us=runtime_limit_us) + runtime_limit_us=runtime_limit_us, + ) + if cpu_limit_ms is not None: + client_action_request.cpu_limit = cpu_limit_ms / 1000.0 self.flow_requests.append(flow_request) self.client_action_requests.append(client_action_request) @@ -542,7 +566,10 @@ def Error(self, backtrace: Optional[str] = None, status: Optional[rdf_structs.EnumNamedValue] = None) -> None: """Terminates this flow with an error.""" - FLOW_ERRORS.Increment(fields=[self.__class__.__name__]) + flow_name = self.__class__.__name__ + is_child = bool(self.rdf_flow.parent_flow_id) + exception_name = _ExtractExceptionName(error_message) + FLOW_ERRORS.Increment(fields=[flow_name, is_child, exception_name]) client_id = self.rdf_flow.client_id flow_id = self.rdf_flow.flow_id @@ -750,7 +777,6 @@ def RunStateMethod( self.replies_to_process = [] except flow.FlowResourcesExceededError as e: - FLOW_ERRORS.Increment(fields=[self.rdf_flow.flow_class_name]) logging.info("Flow %s on %s exceeded resource limits: %s.", self.rdf_flow.flow_id, client_id, str(e)) self.Error(error_message=str(e)) @@ -758,8 +784,6 @@ def RunStateMethod( # to continue. Thus, we catch everything. except Exception as e: # pylint: disable=broad-except msg = str(e) - FLOW_ERRORS.Increment(fields=[self.rdf_flow.flow_class_name]) - self.Error(error_message=msg, backtrace=traceback.format_exc()) def ProcessAllReadyRequests(self) -> Tuple[int, int]: @@ -894,8 +918,7 @@ def FlushQueuedMessages(self) -> None: if self.client_action_requests: client_id = self.rdf_flow.client_id for request in self.client_action_requests: - msg = rdf_flow_objects.GRRMessageFromClientActionRequest(request) - fleetspeak_utils.SendGrrMessageThroughFleetspeak(client_id, msg) + fleetspeak_utils.SendGrrMessageThroughFleetspeak(client_id, request) self.client_action_requests = [] @@ -1168,6 +1191,10 @@ def CreateFlowInstance(cls, flow_object: rdf_flow_objects.Flow) -> "FlowBase": flow_cls = FlowRegistry.FlowClassByName(flow_object.flow_class_name) return flow_cls(flow_object) + @classmethod + def CanUseViaAPI(cls) -> bool: + return bool(cls.category) + def UseProto2AnyResponses( state_method: Callable[ diff --git a/grr/server/grr_response_server/flow_base_test.py b/grr/server/grr_response_server/flow_base_test.py index 4e9c71fddd..0e4aa98973 100644 --- a/grr/server/grr_response_server/flow_base_test.py +++ b/grr/server/grr_response_server/flow_base_test.py @@ -1,4 +1,6 @@ #!/usr/bin/env python +from unittest import mock + from absl.testing import absltest from google.protobuf import any_pb2 @@ -7,6 +9,7 @@ from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import structs as rdf_structs from grr_response_core.stats import default_stats_collector +from grr_response_core.stats import metrics from grr_response_core.stats import stats_collector_instance from grr_response_server import flow_base from grr_response_server import flow_responses @@ -14,10 +17,11 @@ from grr_response_server.databases import db_test_utils from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects from grr.test_lib import db_test_lib +from grr.test_lib import stats_test_lib from grr_response_proto import rrg_pb2 -class FlowBaseTest(absltest.TestCase): +class FlowBaseTest(absltest.TestCase, stats_test_lib.StatsCollectorTestMixin): class Flow(flow_base.FlowBase): pass @@ -324,6 +328,135 @@ def testCallRRGSupported(self, db: abstract_db.Database): # coverage and ensure that the call does not fail. flow.CallRRG(rrg_pb2.GET_SYSTEM_METADATA, empty_pb2.Empty()) + @db_test_lib.WithDatabase + def testErrorIncrementsMetricsWithExceptionName( + self, db: abstract_db.Database + ): + client_id = db_test_utils.InitializeClient(db) + + rdf_flow = rdf_flow_objects.Flow() + rdf_flow.client_id = client_id + rdf_flow.flow_id = self._FLOW_ID + + flow = FlowBaseTest.Flow(rdf_flow) + + with self.SetUpStatsCollector( + default_stats_collector.DefaultStatsCollector() + ): + fake_counter = metrics.Counter( + "fake", + fields=[("flow", str), ("hierarchy", str), ("exception", str)], + ) + with mock.patch.object(flow_base, "FLOW_ERRORS", fake_counter): + # Make sure counter is set to zero + self.assertEqual( + 0, + fake_counter.GetValue( + fields=["Flow", False, "ErrLooksLikeException"] + ), + ) + # Flow fails with error msg + flow.Error("ErrLooksLikeException('should extract exception name')") + + self.assertEqual( + 1, + fake_counter.GetValue(fields=["Flow", False, "ErrLooksLikeException"]), + ) + + @db_test_lib.WithDatabase + def testErrorIncrementsMetricsNoMatch(self, db: abstract_db.Database): + client_id = db_test_utils.InitializeClient(db) + + rdf_flow = rdf_flow_objects.Flow() + rdf_flow.client_id = client_id + rdf_flow.flow_id = self._FLOW_ID + + flow = FlowBaseTest.Flow(rdf_flow) + + with self.SetUpStatsCollector( + default_stats_collector.DefaultStatsCollector() + ): + fake_counter = metrics.Counter( + "fake", + fields=[("flow", str), ("is_child", bool), ("exception", str)], + ) + with mock.patch.object(flow_base, "FLOW_ERRORS", fake_counter): + # Make sure counter is set to zero + self.assertEqual( + 0, + fake_counter.GetValue(fields=["Flow", False, "Unknown"]), + ) + # Flow fails with error msg + flow.Error("Doesn't match the regex") + + self.assertEqual( + 1, + fake_counter.GetValue(fields=["Flow", False, "Unknown"]), + ) + + @db_test_lib.WithDatabase + def testErrorIncrementsMetricsNoName(self, db: abstract_db.Database): + client_id = db_test_utils.InitializeClient(db) + + rdf_flow = rdf_flow_objects.Flow() + rdf_flow.client_id = client_id + rdf_flow.flow_id = self._FLOW_ID + + flow = FlowBaseTest.Flow(rdf_flow) + + with self.SetUpStatsCollector( + default_stats_collector.DefaultStatsCollector() + ): + fake_counter = metrics.Counter( + "fake", + fields=[("flow", str), ("is_child", bool), ("exception", str)], + ) + with mock.patch.object(flow_base, "FLOW_ERRORS", fake_counter): + # Make sure counter is set to zero + self.assertEqual( + 0, + fake_counter.GetValue(fields=["Flow", False, "Unknown"]), + ) + # Flow fails with error msg + flow.Error() + + self.assertEqual( + 1, + fake_counter.GetValue(fields=["Flow", False, "Unknown"]), + ) + + @db_test_lib.WithDatabase + def testErrorIncrementsMetricsChild(self, db: abstract_db.Database): + client_id = db_test_utils.InitializeClient(db) + + rdf_flow = rdf_flow_objects.Flow() + rdf_flow.client_id = client_id + rdf_flow.flow_id = self._FLOW_ID + rdf_flow.parent_flow_id = "NOT EMPTY" + + flow = FlowBaseTest.Flow(rdf_flow) + + with self.SetUpStatsCollector( + default_stats_collector.DefaultStatsCollector() + ): + fake_counter = metrics.Counter( + "fake", + fields=[("flow", str), ("is_child", bool), ("exception", str)], + ) + with mock.patch.object(flow_base, "FLOW_ERRORS", fake_counter): + # Make sure counter is set to zero + self.assertEqual( + 0, + fake_counter.GetValue(fields=["Flow", True, "Unknown"]), + ) + # Flow fails with error msg + flow.Error() + + self.assertEqual( + 1, + fake_counter.GetValue(fields=["Flow", True, "Unknown"]), + ) + if __name__ == "__main__": absltest.main() diff --git a/grr/server/grr_response_server/flow_test.py b/grr/server/grr_response_server/flow_test.py index 81fc398017..001fed8f53 100644 --- a/grr/server/grr_response_server/flow_test.py +++ b/grr/server/grr_response_server/flow_test.py @@ -22,7 +22,7 @@ from grr_response_server import worker_lib from grr_response_server.databases import db from grr_response_server.flows import file -from grr_response_server.flows.general import transfer +from grr_response_server.flows.general import file_finder from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin @@ -485,19 +485,18 @@ def RunFlow(self, client_mock=None): if flow_args is None: - flow_args = transfer.GetFileArgs( - pathspec=rdf_paths.PathSpec( - path="/tmp/evil.txt", pathtype=rdf_paths.PathSpec.PathType.OS)) + flow_args = rdf_file_finder.FileFinderArgs(paths=["/tmp/evil.txt"]) if client_mock is None: client_mock = hunt_test_lib.SampleHuntMock(failrate=2) flow_urn = flow_test_lib.StartAndRunFlow( - flow_cls or transfer.GetFile, + flow_cls or file_finder.FileFinder, client_mock=client_mock, client_id=self.client_id, flow_args=flow_args, - output_plugins=output_plugins) + output_plugins=output_plugins, + ) return flow_urn diff --git a/grr/server/grr_response_server/flows/file.py b/grr/server/grr_response_server/flows/file.py index c09e716b26..0e76e72ac5 100644 --- a/grr/server/grr_response_server/flows/file.py +++ b/grr/server/grr_response_server/flows/file.py @@ -34,7 +34,7 @@ class CollectFilesByKnownPath(transfer.MultiGetFileLogic, flow_base.FlowBase): def GetProgress(self) -> rdf_file_finder.CollectFilesByKnownPathProgress: return self.state.progress - def Start(self): + def Start(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks super().Start(file_size=_MAX_FILE_SIZE) self.state.progress = rdf_file_finder.CollectFilesByKnownPathProgress( @@ -194,7 +194,7 @@ class CollectMultipleFiles(transfer.MultiGetFileLogic, flow_base.FlowBase): def GetProgress(self) -> rdf_file_finder.CollectMultipleFilesProgress: return self.state.progress - def Start(self): + def Start(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks """See base class.""" super().Start(file_size=self.MAX_FILE_SIZE) @@ -370,9 +370,7 @@ def ProcessResponses(self, responses): raise flow_base.FlowError(responses.status.error_message) for response in responses: - result = rdf_client_fs.StatEntry(pathspec=response.stat_entry.pathspec) - - self.SendReply(result) + self.SendReply(response.stat_entry) class HashMultipleFiles(transfer.MultiGetFileLogic, flow_base.FlowBase): @@ -391,7 +389,7 @@ class HashMultipleFiles(transfer.MultiGetFileLogic, flow_base.FlowBase): def GetProgress(self) -> rdf_file_finder.HashMultipleFilesProgress: return self.state.progress - def Start(self): + def Start(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks """See base class.""" super().Start(file_size=self.MAX_FILE_SIZE) diff --git a/grr/server/grr_response_server/flows/file_test.py b/grr/server/grr_response_server/flows/file_test.py index 028375bdc9..03fe207eb4 100644 --- a/grr/server/grr_response_server/flows/file_test.py +++ b/grr/server/grr_response_server/flows/file_test.py @@ -5,6 +5,7 @@ import contextlib import hashlib import os +import time from typing import List from unittest import mock @@ -637,15 +638,16 @@ def _GetCondition(condition_type): class TestStatMultipleFiles(flow_test_lib.FlowTestsBaseclass): - def setUp(self): super().setUp() self.client_id = self.SetupClient(0) self.client_mock = action_mocks.CollectMultipleFilesClientMock() def testReturnsSingleFileStat(self): + timestamp_before_file_creation = rdfvalue.RDFDatetimeSeconds(time.time()) + temp_bar_file_content = b"bar" temp_bar_file = self.create_tempfile() - temp_bar_file.write_bytes(b"bar") + temp_bar_file.write_bytes(temp_bar_file_content) file_bar_path = temp_bar_file.full_path flow_id = flow_test_lib.TestFlowHelper( @@ -658,20 +660,44 @@ def testReturnsSingleFileStat(self): results = flow_test_lib.GetFlowResults(self.client_id, flow_id) + flow_finished_timestamp = rdfvalue.RDFDatetimeSeconds(time.time()) + self.assertLen(results, 1) self.assertEqual( results[0].pathspec.pathtype, rdf_paths.PathSpec.PathType.OS, ) self.assertEqual(file_bar_path, results[0].pathspec.path) + self.assertEqual(results[0].st_size, len(temp_bar_file_content)) + + self.assertBetween( + results[0].st_atime, + timestamp_before_file_creation, + flow_finished_timestamp, + ) + + self.assertBetween( + results[0].st_mtime, + timestamp_before_file_creation, + flow_finished_timestamp, + ) + + self.assertBetween( + results[0].st_ctime, + timestamp_before_file_creation, + flow_finished_timestamp, + ) def testReturnsMultipleFileStats(self): + timestamp_before_file_creation = rdfvalue.RDFDatetimeSeconds(time.time()) + temp_bar_file_content = b"bar" temp_bar_file = self.create_tempfile() - temp_bar_file.write_bytes(b"bar") + temp_bar_file.write_bytes(temp_bar_file_content) file_bar_path = temp_bar_file.full_path + temp_foo_file_content = b"bar" temp_foo_file = self.create_tempfile() - temp_foo_file.write_bytes(b"foo") + temp_foo_file.write_bytes(temp_foo_file_content) file_foo_path = temp_foo_file.full_path flow_id = flow_test_lib.TestFlowHelper( @@ -684,6 +710,8 @@ def testReturnsMultipleFileStats(self): results = flow_test_lib.GetFlowResults(self.client_id, flow_id) + flow_finished_timestamp = rdfvalue.RDFDatetimeSeconds(time.time()) + self.assertLen(results, 2) self.assertEqual(results[0].pathspec.path, file_bar_path) @@ -691,12 +719,48 @@ def testReturnsMultipleFileStats(self): results[0].pathspec.pathtype, rdf_paths.PathSpec.PathType.OS, ) + self.assertEqual(results[0].st_size, len(temp_bar_file_content)) + self.assertBetween( + results[0].st_atime, + timestamp_before_file_creation, + flow_finished_timestamp, + ) + + self.assertBetween( + results[0].st_mtime, + timestamp_before_file_creation, + flow_finished_timestamp, + ) + + self.assertBetween( + results[0].st_ctime, + timestamp_before_file_creation, + flow_finished_timestamp, + ) self.assertEqual(results[1].pathspec.path, file_foo_path) self.assertEqual( results[1].pathspec.pathtype, rdf_paths.PathSpec.PathType.OS, ) + self.assertEqual(results[1].st_size, len(temp_foo_file_content)) + self.assertBetween( + results[1].st_atime, + timestamp_before_file_creation, + flow_finished_timestamp, + ) + + self.assertBetween( + results[1].st_mtime, + timestamp_before_file_creation, + flow_finished_timestamp, + ) + + self.assertBetween( + results[1].st_ctime, + timestamp_before_file_creation, + flow_finished_timestamp, + ) def testFileNotFound(self): temp_dir = self.create_tempdir() diff --git a/grr/server/grr_response_server/flows/general/administrative.py b/grr/server/grr_response_server/flows/general/administrative.py index f471d5afc4..fe36a36cf6 100644 --- a/grr/server/grr_response_server/flows/general/administrative.py +++ b/grr/server/grr_response_server/flows/general/administrative.py @@ -1,7 +1,6 @@ #!/usr/bin/env python """Administrative flows for managing the clients state.""" import logging -import os import shlex import time from typing import Optional, Text, Tuple, Type @@ -69,7 +68,7 @@ class ClientCrashHandler(events.EventListener):

GRR client crash report.

Client {{ client_id }} ({{ hostname }}) just crashed while executing an action. -Click here to access this machine. +Click here to access this machine.

Thanks,

{{ signature }}

@@ -77,7 +76,8 @@ class ClientCrashHandler(events.EventListener): {{ nanny_msg }} """, - autoescape=True) + autoescape=True, + ) def ProcessEvents(self, msgs=None, publisher_username=None): """Processes this event.""" @@ -298,42 +298,6 @@ def Done(self, responses): self.Log(response.data) -class UninstallArgs(rdf_structs.RDFProtoStruct): - protobuf = flows_pb2.UninstallArgs - - -class Uninstall(flow_base.FlowBase): - """Removes the persistence mechanism which the client uses at boot. - - For Windows and OSX, this will disable the service, and then stop the service. - For Linux this flow will fail as we haven't implemented it yet :) - """ - - category = "/Administrative/" - args_type = UninstallArgs - - def Start(self): - """Start the flow and determine OS support.""" - system = self.client_os - - if system == "Darwin" or system == "Windows": - self.CallClient(server_stubs.Uninstall, next_state=self.Kill.__name__) - else: - self.Log("Unsupported platform for Uninstall") - - def Kill(self, responses): - """Call the kill function on the client.""" - if not responses.success: - self.Log("Failed to uninstall client.") - elif self.args.kill: - self.CallClient(server_stubs.Kill, next_state=self.Confirmation.__name__) - - def Confirmation(self, responses): - """Confirmation of kill.""" - if not responses.success: - self.Log("Kill failed on the client.") - - class Kill(flow_base.FlowBase): """Terminate a running client (does not disable, just kill).""" @@ -506,14 +470,15 @@ class OnlineNotification(flow_base.FlowBase):

Client {{ client_id }} ({{ hostname }}) just came online. Click - here to access this machine. + here to access this machine.
This notification was created by {{ creator }}.

Thanks,

{{ signature }}

""", - autoescape=True) + autoescape=True, + ) args_type = OnlineNotificationArgs @@ -558,69 +523,6 @@ def SendMail(self, responses): self.args.email, "grr-noreply", subject, body, is_html=True) -class UpdateClientArgs(rdf_structs.RDFProtoStruct): - protobuf = flows_pb2.UpdateClientArgs - rdf_deps = [] - - -class UpdateClient(RecursiveBlobUploadMixin, flow_base.FlowBase): - """Updates the GRR client to a new version replacing the current client. - - This will execute the specified installer on the client and then run - an Interrogate flow. - - The new installer's binary has to be uploaded to GRR (as a binary, not as - a Python hack) and must be signed using the exec signing key. - - Signing and upload of the file is done with grr_config_updater or through - the API. - """ - - category = "/Administrative/" - - args_type = UpdateClientArgs - - def GenerateUploadRequest( - self, offset: int, file_size: int, blob: rdf_crypto.SignedBlob - ) -> Tuple[rdf_structs.RDFProtoStruct, Type[server_stubs.ClientActionStub]]: - request = rdf_client_action.ExecuteBinaryRequest( - executable=blob, - offset=offset, - write_path=self.state.write_path, - more_data=(offset + len(blob.data) < file_size), - use_client_env=False) - - return request, server_stubs.UpdateAgent - - @property - def _binary_id(self): - return rdf_objects.SignedBinaryID( - binary_type=rdf_objects.SignedBinaryID.BinaryType.EXECUTABLE, - path=self.args.binary_path) - - def Start(self): - """Start.""" - if not self.args.binary_path: - raise flow_base.FlowError("Installer binary path is not specified.") - - self.state.write_path = "%d_%s" % (int( - time.time()), os.path.basename(self.args.binary_path)) - - self.StartBlobsUpload(self._binary_id, self.Interrogate.__name__) - - def Interrogate(self, responses): - if not responses.success: - raise flow_base.FlowError("Installer reported an error: %s" % - responses.status) - - self.Log("Installer completed.") - self.CallFlow(discovery.Interrogate.__name__, next_state=self.End.__name__) - - def End(self, responses): - if not responses.success: - raise flow_base.FlowError(responses.status) - - class NannyMessageHandlerMixin(object): """A listener for nanny messages.""" @@ -632,12 +534,13 @@ class NannyMessageHandlerMixin(object):
{{ message }}
-Click here to access this machine. +Click here to access this machine.

{{ signature }}

""", - autoescape=True) + autoescape=True, + ) subject = "GRR nanny message received from %s." @@ -704,12 +607,13 @@ class ClientAlertHandlerMixin(NannyMessageHandlerMixin):
{{ message }}
-Click here to access this machine. +Click here to access this machine.

{{ signature }}

""", - autoescape=True) + autoescape=True, + ) subject = "GRR client message received from %s." @@ -754,6 +658,13 @@ def WriteClientStartupInfo(self, client_id, new_si): index = client_index.ClientIndex() index.AddClientLabels(client_id, labels) + # Reset foreman rules check so active hunts can match against the new + # data + data_store.REL_DB.WriteClientMetadata( + client_id, + last_foreman=rdfvalue.RDFDatetime.EarliestDatabaseSafeValue(), + ) + except db.UnknownClientError: # On first contact with a new client, this write will fail. logging.info("Can't write StartupInfo for unknown client %s", client_id) @@ -803,45 +714,6 @@ def _IsInterrogateNeeded(self, client_id: str, return True -class KeepAliveArgs(rdf_structs.RDFProtoStruct): - protobuf = flows_pb2.KeepAliveArgs - rdf_deps = [ - rdfvalue.DurationSeconds, - ] - - -class KeepAlive(flow_base.FlowBase): - """Requests that the clients stays alive for a period of time.""" - - category = "/Administrative/" - behaviours = flow_base.BEHAVIOUR_BASIC - - sleep_time = 60 - args_type = KeepAliveArgs - - def Start(self): - self.state.end_time = self.args.duration.Expiry() - self.CallStateInline(next_state=self.SendMessage.__name__) - - def SendMessage(self, responses): - if not responses.success: - self.Log(responses.status.error_message) - raise flow_base.FlowError(responses.status.error_message) - - self.CallClient( - server_stubs.Echo, data="Wake up!", next_state=self.Sleep.__name__) - - def Sleep(self, responses): - if not responses.success: - self.Log(responses.status.error_message) - raise flow_base.FlowError(responses.status.error_message) - - if rdfvalue.RDFDatetime.Now() < self.state.end_time - self.sleep_time: - start_time = rdfvalue.RDFDatetime.Now() + self.sleep_time - self.CallState( - next_state=self.SendMessage.__name__, start_time=start_time) - - class LaunchBinaryArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.LaunchBinaryArgs rdf_deps = [ diff --git a/grr/server/grr_response_server/flows/general/administrative_test.py b/grr/server/grr_response_server/flows/general/administrative_test.py index be5474e24e..ccabb810e5 100644 --- a/grr/server/grr_response_server/flows/general/administrative_test.py +++ b/grr/server/grr_response_server/flows/general/administrative_test.py @@ -1,11 +1,10 @@ #!/usr/bin/env python """Tests for administrative flows.""" -import os +import datetime import subprocess import sys import tempfile -import time from unittest import mock from absl import app @@ -54,20 +53,6 @@ def Start(self): action_registry.ACTION_STUB_BY_ID[self.args.action], next_state="End") -class KeepAliveFlowTest(flow_test_lib.FlowTestsBaseclass): - """Tests for the KeepAlive flow.""" - - def testKeepAliveRunsSuccessfully(self): - client_id = self.SetupClient(0) - client_mock = action_mocks.ActionMock(admin.Echo) - flow_test_lib.TestFlowHelper( - administrative.KeepAlive.__name__, - duration=rdfvalue.Duration.From(1, rdfvalue.SECONDS), - client_id=client_id, - client_mock=client_mock, - creator=self.test_username) - - class TestAdministrativeFlows(flow_test_lib.FlowTestsBaseclass, hunt_test_lib.StandardHuntTestMixin): """Tests the administrative flows.""" @@ -169,8 +154,10 @@ def SendEmail(address, sender, title, message, **_): # Make sure the flow state is included in the email message. self.assertIn("Host-0.example.com", email_message["message"]) - self.assertIn("http://localhost:8000/#/clients/C.1000000000000000", - email_message["message"]) + self.assertIn( + "http://localhost:8000/v2/clients/C.1000000000000000", + email_message["message"], + ) self.assertIn(client_id, email_message["title"]) rel_flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id) @@ -429,50 +416,6 @@ def Run(self, args): command_line="--bar --baz", creator=self.test_username) - def testUpdateClient(self): - client_mock = action_mocks.UpdateAgentClientMock() - fake_installer = b"FakeGRRDebInstaller" * 20 - upload_path = signed_binary_utils.GetAFF4ExecutablesRoot().Add( - config.CONFIG["Client.platform"]).Add("test.deb") - maintenance_utils.UploadSignedConfigBlob( - fake_installer, aff4_path=upload_path, limit=100) - - blob_list, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( - upload_path) - self.assertLen(list(blob_list), 4) - - acl_test_lib.CreateAdminUser(self.test_username) - - flow_test_lib.TestFlowHelper( - administrative.UpdateClient.__name__, - client_mock, - client_id=self.SetupClient(0, system=""), - binary_path=os.path.join(config.CONFIG["Client.platform"], "test.deb"), - creator=self.test_username) - self.assertEqual(client_mock.GetDownloadedFileContents(), fake_installer) - - def testUpdateClientSingleBlob(self): - client_mock = action_mocks.UpdateAgentClientMock() - fake_installer = b"FakeGRRDebInstaller" * 20 - upload_path = signed_binary_utils.GetAFF4ExecutablesRoot().Add( - config.CONFIG["Client.platform"]).Add("test.deb") - maintenance_utils.UploadSignedConfigBlob( - fake_installer, aff4_path=upload_path, limit=1000) - - blob_list, _ = signed_binary_utils.FetchBlobsForSignedBinaryByURN( - upload_path) - self.assertLen(list(blob_list), 1) - - acl_test_lib.CreateAdminUser(self.test_username) - - flow_test_lib.TestFlowHelper( - administrative.UpdateClient.__name__, - client_mock, - client_id=self.SetupClient(0, system=""), - binary_path=os.path.join(config.CONFIG["Client.platform"], "test.deb"), - creator=self.test_username) - self.assertEqual(client_mock.GetDownloadedFileContents(), fake_installer) - def testGetClientStats(self): client_id = self.SetupClient(0) @@ -589,7 +532,7 @@ def testStartupHandler(self): self.assertNotEqual(new_si.boot_time, si.boot_time) # Now set a new client build time. - build_time = time.strftime("%a %b %d %H:%M:%S %Y") + build_time = datetime.datetime.now(datetime.timezone.utc).isoformat() with test_lib.ConfigOverrider({"Client.build_time": build_time}): # Run it again - this should now update the client info. @@ -837,6 +780,26 @@ def SendEmail(address, sender, title, message, **_): self._CheckAlertEmail(client_id, client_message, email_dict) + def testForemanTimeIsResetOnClientStartupInfoWrite(self): + client_id = self.SetupClient(0) + reset_time = rdfvalue.RDFDatetime.EarliestDatabaseSafeValue() + later_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3600) + + data_store.REL_DB.WriteClientMetadata(client_id, last_foreman=later_time) + self._RunSendStartupInfo(client_id) + + md = data_store.REL_DB.ReadClientMetadata(client_id) + self.assertIsNotNone(md.last_foreman_time) + self.assertEqual(md.last_foreman_time, reset_time) + + # Run it again - this should not update any record. + data_store.REL_DB.WriteClientMetadata(client_id, last_foreman=later_time) + self._RunSendStartupInfo(client_id) + + md = data_store.REL_DB.ReadClientMetadata(client_id) + self.assertIsNotNone(md.last_foreman_time) + self.assertEqual(md.last_foreman_time, later_time) + def main(argv): # Run the full test suite diff --git a/grr/server/grr_response_server/flows/general/apple_firmware.py b/grr/server/grr_response_server/flows/general/apple_firmware.py deleted file mode 100644 index 2fa844fdfb..0000000000 --- a/grr/server/grr_response_server/flows/general/apple_firmware.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env python -"""A flow to collect eficheck output.""" - -from grr_response_core.lib.parsers import eficheck_parser -from grr_response_core.lib.rdfvalues import apple_firmware as rdf_apple_firmware -from grr_response_server import flow_base -from grr_response_server import server_stubs -from grr_response_server.flows.general import transfer - - -class CollectEfiHashes(flow_base.FlowBase): - """Collect the hashes of the EFI volumes (MacOS only). - - This flow will run the eficheck binary on the host to extract a list of - hashes for each volume on the flash. This flow provides a fast method - to verify the system firmware. However, it does not provide further data - should some hashes not match. In this case, please use the DumpEfiImage - flow to retrieve the full firmware image and perform further investigation. - """ - - category = "/Collectors/" - args_type = rdf_apple_firmware.EficheckFlowArgs - result_types = (rdf_apple_firmware.EfiCollection,) - behaviours = flow_base.BEHAVIOUR_BASIC - - def Start(self): - """Call the CollectEfiHash client action.""" - self.CallClient( - server_stubs.EficheckCollectHashes, - cmd_path=self.args.cmd_path, - next_state=self.CollectedHashes.__name__) - - def CollectedHashes(self, responses): - """Process the output of eficheck.""" - if not responses.success: - raise flow_base.FlowError("Unable to collect the hashes: %s" % - responses.status) - elif not responses: - raise flow_base.FlowError("No hash collected.") - else: - for collect_response in responses: - exec_response = collect_response.response - if exec_response.exit_status: - self.Log(exec_response.stdout) - self.Log(exec_response.stderr) - err_msg = ("Unable to collect the hashes. " - "Exit status = %d") % exec_response.exit_status - raise flow_base.FlowError(err_msg) - parser = eficheck_parser.EficheckCmdParser() - for result in parser.Parse("eficheck", ["--show-hashes"], - exec_response.stdout, exec_response.stderr, - exec_response.exit_status, None): - result.boot_rom_version = collect_response.boot_rom_version - result.eficheck_version = collect_response.eficheck_version - self.SendReply(result) - - -class DumpEfiImage(flow_base.FlowBase): - """Dump the Flash Image (MacOS only). - - This flow will use eficheck to extract a copy of the flash image from the - host. For a quick verification, consider using the CollectEfiHashes flow - first. - """ - - category = "/Collectors/" - args_type = rdf_apple_firmware.EficheckFlowArgs - result_types = (rdf_apple_firmware.DumpEfiImageResponse,) - behaviours = flow_base.BEHAVIOUR_BASIC - - def Start(self): - """Call the DumpEficheckImage client action.""" - self.CallClient( - server_stubs.EficheckDumpImage, - cmd_path=self.args.cmd_path, - next_state=self.CollectedImage.__name__) - - def CollectedImage(self, responses): - """Process the output of eficheck.""" - if not responses.success: - raise flow_base.FlowError("Unable to create the flash image: %s" % - responses.status) - for img_response in responses: - exec_response = img_response.response - if exec_response.stdout: - self.Log("stdout = %s" % exec_response.stdout) - if exec_response.stderr: - self.Log("stderr = %s" % exec_response.stderr) - if exec_response.exit_status: - err_msg = ("Unable to dump the flash image. " - "Exit status = %d") % exec_response.exit_status - raise flow_base.FlowError(err_msg) - if img_response.path: - image_path = img_response.path - self.SendReply(img_response) - self.CallFlow( - transfer.MultiGetFile.__name__, - pathspecs=[image_path], - next_state=self.DeleteTemporaryDir.__name__) - - def DeleteTemporaryDir(self, responses): - """Remove the temporary image from the client.""" - if not responses.success: - raise flow_base.FlowError("Unable to collect the flash image: %s" % - responses.status) - response = responses.First() - if not response.pathspec: - raise flow_base.FlowError("Empty pathspec: %s" % str(response)) - - # Clean up the temporary image from the client. - self.CallClient( - server_stubs.DeleteGRRTempFiles, - response.pathspec, - next_state=self.TemporaryImageRemoved.__name__) - - def TemporaryImageRemoved(self, responses): - """Verify that the temporary image has been removed successfully.""" - if not responses.success: - raise flow_base.FlowError("Unable to delete the temporary flash image: " - "%s" % responses.status) diff --git a/grr/server/grr_response_server/flows/general/apple_firmware_test.py b/grr/server/grr_response_server/flows/general/apple_firmware_test.py deleted file mode 100644 index 90d56f964b..0000000000 --- a/grr/server/grr_response_server/flows/general/apple_firmware_test.py +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env python -"""Tests for eficheck flows.""" - -from absl import app - -from grr_response_client.client_actions import standard -from grr_response_client.client_actions import tempfiles -from grr_response_core.lib.rdfvalues import apple_firmware as rdf_apple_firmware -from grr_response_core.lib.rdfvalues import client_action as rdf_client_action -from grr_response_server.flows.general import apple_firmware -from grr.test_lib import action_mocks -from grr.test_lib import flow_test_lib -from grr.test_lib import test_lib - - -class CollectEfiHashesMock(action_mocks.ActionMock): - - def EficheckCollectHashes(self, args): - stdout = ( - b"01:00:00:00190048:00003c5f:" - b"4d37da42-3a0c-4eda-b9eb-bc0e1db4713b:" - b"03a3fb4ca9b65be048b04e44ab5d1dd8e1af1ca9d1f53a5e96e8ae0125a02bb2") - exec_response = rdf_client_action.ExecuteBinaryResponse( - stdout=stdout, exit_status=0) - response = rdf_apple_firmware.CollectEfiHashesResponse( - eficheck_version="1.9.6", - boot_rom_version="MBP101.B00", - response=exec_response) - return [response] - - -class CollectEfiHashesFailMock(CollectEfiHashesMock): - - def EficheckCollectHashes(self, args): - stderr = b"Unable to collect the hashes" - exec_response = rdf_client_action.ExecuteBinaryResponse( - stderr=stderr, exit_status=-1) - response = rdf_apple_firmware.CollectEfiHashesResponse( - response=exec_response) - return [response] - - -class CollectEfiNoHashesMock(CollectEfiHashesMock): - - def EficheckCollectHashes(self, args): - return [] - - -class CollectEfiHashesTest(flow_test_lib.FlowTestsBaseclass): - - def setUp(self): - super().setUp() - self.client_id = self.SetupClient(0, system="Darwin") - - def testCollectHashes(self): - """Tests Collect hashes.""" - client_mock = CollectEfiHashesMock() - - flow_id = flow_test_lib.TestFlowHelper( - apple_firmware.CollectEfiHashes.__name__, - client_mock, - client_id=self.client_id, - creator=self.test_username) - - results = flow_test_lib.GetFlowResults(self.client_id, flow_id) - - self.assertLen(results, 1) - efi, = results - self.assertEqual(efi.boot_rom_version, "MBP101.B00") - self.assertEqual(efi.eficheck_version, "1.9.6") - self.assertLen(efi.entries, 1) - self.assertEqual(efi.entries[0].guid, - "4d37da42-3a0c-4eda-b9eb-bc0e1db4713b") - - def testCollectHashesError(self): - """Tests fail collection.""" - client_mock = CollectEfiHashesFailMock() - - with self.assertRaises(RuntimeError) as err: - with test_lib.SuppressLogs(): - flow_test_lib.TestFlowHelper( - apple_firmware.CollectEfiHashes.__name__, - client_mock, - client_id=self.client_id, - creator=self.test_username) - - self.assertIn("Unable to collect the hashes.", str(err.exception)) - - def testCollectNoHashesError(self): - """Tests exception when no results is returned.""" - client_mock = CollectEfiNoHashesMock() - - with self.assertRaises(RuntimeError) as err: - with test_lib.SuppressLogs(): - flow_test_lib.TestFlowHelper( - apple_firmware.CollectEfiHashes.__name__, - client_mock, - client_id=self.client_id, - creator=self.test_username) - - self.assertIn("No hash collected.", str(err.exception)) - - -class DumpEfiImageMock(action_mocks.ActionMock): - - def __init__(self, *args, **kwargs): - super().__init__(standard.HashBuffer, standard.HashFile, - standard.GetFileStat, standard.TransferBuffer, - tempfiles.DeleteGRRTempFiles) - - def EficheckDumpImage(self, args): - flash_fd, flash_path = tempfiles.CreateGRRTempFileVFS() - flash_fd.close() - stdout = "Image successfully written to firmware.bin." - exec_response = rdf_client_action.ExecuteBinaryResponse( - stdout=stdout.encode("utf-8"), exit_status=0) - response = rdf_apple_firmware.DumpEfiImageResponse( - eficheck_version="1.9.6", response=exec_response, path=flash_path) - return [response] - - -class DumpEfiImageFailMock(action_mocks.ActionMock): - - def EficheckDumpImage(self, args): - stderr = "Unable to connect to the kernel driver." - exec_response = rdf_client_action.ExecuteBinaryResponse( - stderr=stderr.encode("utf-8"), exit_status=1) - response = rdf_apple_firmware.DumpEfiImageResponse( - eficheck_version="1.9.6", response=exec_response) - return [response] - - -class DumpEfiImageTest(flow_test_lib.FlowTestsBaseclass): - - def setUp(self): - super().setUp() - self.client_id = self.SetupClient(0, system="Darwin") - - def testDumpImage(self): - """Tests EFI dump.""" - client_mock = DumpEfiImageMock() - - flow_id = flow_test_lib.TestFlowHelper( - apple_firmware.DumpEfiImage.__name__, - client_mock, - client_id=self.client_id, - creator=self.test_username) - - # Check the output of the flow. - results = flow_test_lib.GetFlowResults(self.client_id, flow_id) - - self.assertLen(results, 1) - dump_response, = results - self.assertEqual(dump_response.eficheck_version, "1.9.6") - self.assertEqual(dump_response.response.exit_status, 0) - - def testDumpImageFail(self): - """Tests EFI Failed dump.""" - client_mock = DumpEfiImageFailMock() - - with self.assertRaises(RuntimeError) as err: - with test_lib.SuppressLogs(): - flow_test_lib.TestFlowHelper( - apple_firmware.DumpEfiImage.__name__, - client_mock, - client_id=self.client_id, - creator=self.test_username) - - self.assertIn("Unable to dump the flash image", str(err.exception)) - - -def main(argv): - # Run the full test suite - test_lib.main(argv) - - -if __name__ == "__main__": - app.run(main) diff --git a/grr/server/grr_response_server/flows/general/collectors_test.py b/grr/server/grr_response_server/flows/general/collectors_test.py index 2ada6c38df..ce41638af9 100644 --- a/grr/server/grr_response_server/flows/general/collectors_test.py +++ b/grr/server/grr_response_server/flows/general/collectors_test.py @@ -5,6 +5,7 @@ into collectors_*_test.py files. """ +import itertools import os import shutil from typing import IO @@ -538,7 +539,7 @@ def _RunListProcesses(self, args): def testGrep2(self): client_id = self.SetupClient(0, system="Linux") - client_mock = action_mocks.FileFinderClientMock() + client_mock = action_mocks.ClientFileFinderClientMock() with temp.AutoTempFilePath() as temp_file_path: with open(temp_file_path, "w") as f: f.write("foo") @@ -553,7 +554,9 @@ def testGrep2(self): results = self._RunClientActionArtifact( client_id, client_mock, ["FakeArtifact"] ) - matches = [r.matches[0].data for r in results] + matches = itertools.chain.from_iterable( + [m.data for m in r.matches] for r in results + ) expected_matches = [b"f", b"oo"] self.assertCountEqual(matches, expected_matches) @@ -608,49 +611,6 @@ def testArtifactFilesDownloaderFlow(self): self.assertLen(results, 1) self.assertEqual(results[0].downloaded_file.pathspec.path, temp_file_path) - def testLegacyFileFinderSetting(self): - """Test that ArtifactCollectorFlow can use the legacy FileFinder flow.""" - - def _RunCollectorFlow(client_id, client_mock): - with vfs_test_lib.FakeTestDataVFSOverrider(): - flow_id = flow_test_lib.TestFlowHelper( - collectors.ArtifactCollectorFlow.__name__, - client_mock, - artifact_list=["TestFilesArtifact"], - creator=self.test_username, - client_id=client_id, - ) - _ = flow_test_lib.GetFlowResults(client_id, flow_id) - - class _ClientMock(action_mocks.ActionMock): - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.called_actions = set() - - def FileFinderOS(self, _): - self.called_actions.add("FileFinderOS") - return [] - - def GetFileStat(self, _): - self.called_actions.add("GetFileStat") - return [] - - client_id = self.SetupClient(0, system="Linux") - - client_mock = _ClientMock() - _RunCollectorFlow(client_id, client_mock) - self.assertIn("FileFinderOS", client_mock.called_actions) - self.assertNotIn("GetFileStat", client_mock.called_actions) - - with test_lib.ConfigOverrider( - {"Server.internal_artifactcollector_use_legacy_filefinder": True} - ): - client_mock = _ClientMock() - _RunCollectorFlow(client_id, client_mock) - self.assertIn("GetFileStat", client_mock.called_actions) - self.assertNotIn("FileFinderOS", client_mock.called_actions) - class RelationalTestArtifactCollectors( ArtifactCollectorsTestMixin, test_lib.GRRBaseTest @@ -1254,6 +1214,7 @@ def testLinuxMountCmdArtifact(self): ) expected = expected[0] self.assertIsInstance(expected, rdf_client_action.ExecuteResponse) + self.assertEqual(expected.exit_status, 0) # Run the ClientArtifactCollector to get the actual result. results = self._RunFlow( @@ -1264,8 +1225,9 @@ def testLinuxMountCmdArtifact(self): ) artifact_response = results[0] self.assertIsInstance(artifact_response, rdf_client_action.ExecuteResponse) + self.assertEqual(artifact_response.exit_status, 0) - self.assertEqual(artifact_response, expected) + self.assertEqual(artifact_response.stdout, expected.stdout) def testBasicRegistryKeyArtifact(self): """Test that a registry key artifact can be collected.""" diff --git a/grr/server/grr_response_server/flows/general/crowdstrike.py b/grr/server/grr_response_server/flows/general/crowdstrike.py new file mode 100644 index 0000000000..f73afb89d4 --- /dev/null +++ b/grr/server/grr_response_server/flows/general/crowdstrike.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +"""Flows related to CrowdStrike security software.""" +import binascii +import re + +from grr_response_core.lib import rdfvalue +from grr_response_core.lib.rdfvalues import client as rdf_client +from grr_response_core.lib.rdfvalues import client_action as rdf_client_action +from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs +from grr_response_core.lib.rdfvalues import paths as rdf_paths +from grr_response_core.lib.rdfvalues import structs as rdf_structs +from grr_response_proto import crowdstrike_pb2 +from grr_response_server import data_store +from grr_response_server import flow_base +from grr_response_server import flow_responses +from grr_response_server import server_stubs +from grr_response_server.rdfvalues import objects as rdf_objects + + +class GetCrowdstrikeAgentIdResult(rdf_structs.RDFProtoStruct): + protobuf = crowdstrike_pb2.GetCrowdstrikeAgentIdResult + rdf_deps = [] + + +class GetCrowdStrikeAgentID(flow_base.FlowBase): + """Flow that retrieves the identifier of the CrowdStrike agent.""" + + friendly_name = "Get CrowdStrike agent identifier" + category = "/Collectors/" + + result_types = (GetCrowdstrikeAgentIdResult,) + + def Start(self) -> None: + if self.client_os == "Linux": + return self._StartLinux() + elif self.client_os == "Windows": + return self._StartWindows() + elif self.client_os == "Darwin": + return self._StartMacOS() + else: + raise flow_base.FlowError(f"Unexpected system: {self.client_os}") + + def _StartLinux(self) -> None: + args = rdf_client_action.ExecuteRequest() + args.cmd = "/opt/CrowdStrike/falconctl" + args.args = ["-g", "--cid", "--aid"] + + self.CallClient( + server_stubs.ExecuteCommand, + args, + next_state=self._OnLinuxResponse.__name__, + ) + + def _StartWindows(self) -> None: + # TODO: There is no dedicated action for obtaining registry + # values. The existing artifact collector uses `GetFileStat` action for this + # which is horrible. + args = rdf_client_action.GetFileStatRequest() + args.pathspec.pathtype = rdf_paths.PathSpec.PathType.REGISTRY + args.pathspec.path = ( + r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\CSAgent\Sim\AG" + ) + + self.CallClient( + server_stubs.GetFileStat, + args, + next_state=self._OnWindowsResponse.__name__, + ) + + def _StartMacOS(self) -> None: + # The agent identifier is stored in the first 16 bytes of the file so we + # request only as much. + args = rdf_client.BufferReference() + args.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS + args.pathspec.path = "/Library/CS/registry.base" + args.offset = 0 + args.length = 16 + + self.CallClient( + server_stubs.TransferBuffer, + args, + next_state=self._OnMacOSResponse.__name__, + ) + + def _OnLinuxResponse(self, responses: flow_responses.Responses) -> None: + if not responses.success: + self.Log("Failed to retrieve agent identifier: %s", responses.status) + return + + if len(responses) != 1: + raise flow_base.FlowError(f"Unexpected response count: {len(responses)}") + + response = responses.First() + if not isinstance(response, rdf_client_action.ExecuteResponse): + raise flow_base.FlowError(f"Unexpected response type: {type(response)!r}") + + stdout = response.stdout.decode("utf-8", errors="ignore") + if (match := _LINUX_AID_REGEX.search(stdout)) is None: + self.Log("Malformed `falconctl` output: %s", stdout) + return + + result = GetCrowdstrikeAgentIdResult() + result.agent_id = match.group("aid") + self.SendReply(result) + + def _OnWindowsResponse(self, responses: flow_responses.Responses) -> None: + if not responses.success: + self.Log("Failed to retrieve agent identifier: %s", responses.status) + return + + if len(responses) != 1: + raise flow_base.FlowError(f"Unexpected response count: {len(responses)}") + + response = responses.First() + if not isinstance(response, rdf_client_fs.StatEntry): + raise flow_base.FlowError(f"Unexpected response type: {type(response)!r}") + + agent_id_bytes = response.registry_data.data + + result = GetCrowdstrikeAgentIdResult() + result.agent_id = binascii.hexlify(agent_id_bytes).decode("ascii") + self.SendReply(result) + + def _OnMacOSResponse(self, responses: flow_responses.Responses) -> None: + assert data_store.BLOBS is not None + + if not responses.success: + self.Log("Failed to retrieve agent identifier: %s", responses.status) + return + + if len(responses) != 1: + raise flow_base.FlowError(f"Unexpected response count: {len(responses)}") + + response = responses.First() + if not isinstance(response, rdf_client.BufferReference): + raise flow_base.FlowError(f"Unexpected response type: {type(response)!r}") + + blob_id = rdf_objects.BlobID(response.data) + blob = data_store.BLOBS.ReadAndWaitForBlob(blob_id, _BLOB_WAIT_TIMEOUT) + if blob is None: + raise flow_base.FlowError(f"Blob {blob_id!r} not found") + + result = GetCrowdstrikeAgentIdResult() + result.agent_id = binascii.hexlify(blob).decode("ascii") + self.SendReply(result) + + +_LINUX_AID_REGEX = re.compile(r"aid=\"(?P[0-9A-Fa-f]+)\"") +_BLOB_WAIT_TIMEOUT = rdfvalue.Duration.From(30, rdfvalue.SECONDS) diff --git a/grr/server/grr_response_server/flows/general/crowdstrike_test.py b/grr/server/grr_response_server/flows/general/crowdstrike_test.py new file mode 100644 index 0000000000..f11c4b6b30 --- /dev/null +++ b/grr/server/grr_response_server/flows/general/crowdstrike_test.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python +import binascii +import hashlib +import os + +from absl.testing import absltest + +from grr_response_client import actions +from grr_response_core.lib import rdfvalue +from grr_response_core.lib.rdfvalues import client as rdf_client +from grr_response_core.lib.rdfvalues import client_action as rdf_client_action +from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs +from grr_response_core.lib.rdfvalues import paths as rdf_paths +from grr_response_core.lib.rdfvalues import protodict as rdf_protodict +from grr_response_server import data_store +from grr_response_server import server_stubs +from grr_response_server.databases import db_test_utils +from grr_response_server.flows.general import crowdstrike +from grr_response_server.rdfvalues import objects as rdf_objects +from grr.test_lib import action_mocks +from grr.test_lib import flow_test_lib +from grr.test_lib import testing_startup + + +class GetCrowdStrikeAgentID(flow_test_lib.FlowTestsBaseclass): + + @classmethod + def setUpClass(cls): + super().setUpClass() + testing_startup.TestInit() + + def testLinux(self): + assert data_store.REL_DB is not None + + agent_id = os.urandom(16) + agent_id_hex = binascii.hexlify(agent_id).decode("ascii") + + client_id = db_test_utils.InitializeClient(data_store.REL_DB) + + client_snapshot = rdf_objects.ClientSnapshot() + client_snapshot.client_id = client_id + client_snapshot.knowledge_base.os = "Linux" + data_store.REL_DB.WriteClientSnapshot(client_snapshot) + + class ExecuteCommandMock(actions.ActionPlugin): + in_rdfvalue = rdf_client_action.ExecuteRequest + out_rdfvalues = [rdf_client_action.ExecuteResponse] + + def Run(self, args: rdf_client_action.ExecuteRequest) -> None: + del args # Unused. + + stdout = f'cid="4815162342",aid="{agent_id_hex}"' + + result = rdf_client_action.ExecuteResponse() + result.stdout = stdout.encode("ascii") + self.SendReply(result) + + flow_id = flow_test_lib.StartAndRunFlow( + crowdstrike.GetCrowdStrikeAgentID, + client_mock=action_mocks.ActionMock.With({ + server_stubs.ExecuteCommand.__name__: ExecuteCommandMock, + }), + client_id=client_id, + ) + + results = flow_test_lib.GetFlowResults(client_id, flow_id) + self.assertLen(results, 1) + + result = results[0] + self.assertIsInstance(result, crowdstrike.GetCrowdstrikeAgentIdResult) + self.assertEqual(result.agent_id, agent_id_hex) + + def testLinuxMalformedOutput(self): + assert data_store.REL_DB is not None + + client_id = db_test_utils.InitializeClient(data_store.REL_DB) + + client_snapshot = rdf_objects.ClientSnapshot() + client_snapshot.client_id = client_id + client_snapshot.knowledge_base.os = "Linux" + data_store.REL_DB.WriteClientSnapshot(client_snapshot) + + class ExecuteCommandMock(actions.ActionPlugin): + in_rdfvalue = rdf_client_action.ExecuteRequest + out_rdfvalues = [rdf_client_action.ExecuteResponse] + + def Run(self, args: rdf_client_action.ExecuteRequest) -> None: + del args # Unused. + + stdout = 'cid="4815162342"' + + result = rdf_client_action.ExecuteResponse() + result.stdout = stdout.encode("ascii") + self.SendReply(result) + + flow_id = flow_test_lib.StartAndRunFlow( + crowdstrike.GetCrowdStrikeAgentID, + client_mock=action_mocks.ActionMock.With({ + server_stubs.ExecuteCommand.__name__: ExecuteCommandMock, + }), + client_id=client_id, + ) + + results = flow_test_lib.GetFlowResults(client_id, flow_id) + self.assertEmpty(results) + + self.assertFlowLoggedRegex( + client_id, + flow_id, + "malformed `falconctl` output", + ) + + def testWindows(self): + assert data_store.REL_DB is not None + + agent_id = os.urandom(16) + agent_id_hex = binascii.hexlify(agent_id).decode("ascii") + + client_id = db_test_utils.InitializeClient(data_store.REL_DB) + + client_snapshot = rdf_objects.ClientSnapshot() + client_snapshot.client_id = client_id + client_snapshot.knowledge_base.os = "Windows" + data_store.REL_DB.WriteClientSnapshot(client_snapshot) + + class GetFileStatMock(actions.ActionPlugin): + in_rdfvalue = rdf_client_action.GetFileStatRequest + out_rdfvalues = [rdf_client_fs.StatEntry] + + def Run(self, args: rdf_client_action.GetFileStatRequest) -> None: + del args # Unused. + + result = rdf_client_fs.StatEntry() + result.registry_data.data = agent_id + self.SendReply(result) + + flow_id = flow_test_lib.StartAndRunFlow( + crowdstrike.GetCrowdStrikeAgentID, + client_mock=action_mocks.ActionMock.With({ + server_stubs.GetFileStat.__name__: GetFileStatMock, + }), + client_id=client_id, + ) + + results = flow_test_lib.GetFlowResults(client_id, flow_id) + self.assertLen(results, 1) + + result = results[0] + self.assertIsInstance(result, crowdstrike.GetCrowdstrikeAgentIdResult) + self.assertEqual(result.agent_id, agent_id_hex) + + def testMacOS(self): + assert data_store.REL_DB is not None + + agent_id = os.urandom(16) + agent_id_hex = binascii.hexlify(agent_id).decode("ascii") + + client_id = db_test_utils.InitializeClient(data_store.REL_DB) + + client_snapshot = rdf_objects.ClientSnapshot() + client_snapshot.client_id = client_id + client_snapshot.knowledge_base.os = "Darwin" + data_store.REL_DB.WriteClientSnapshot(client_snapshot) + + class TransferBufferMock(actions.ActionPlugin): + in_rdfvalue = rdf_client.BufferReference + out_rdfvalues = [rdf_client.BufferReference] + + TRANSFER_STORE = rdfvalue.SessionID(flow_name="TransferStore") + + def Run(self, args: rdf_client.BufferReference) -> None: + del args # Unused. + + blob = rdf_protodict.DataBlob() + blob.data = agent_id + self.SendReply(blob, session_id=self.TRANSFER_STORE) + + result = rdf_client.BufferReference() + result.offset = 0 + result.length = len(blob.data) + result.data = hashlib.sha256(blob.data).digest() + result.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS + result.pathspec.path = r"/Library/CS/registry.base" + self.SendReply(result) + + flow_id = flow_test_lib.StartAndRunFlow( + crowdstrike.GetCrowdStrikeAgentID, + client_mock=action_mocks.ActionMock.With({ + server_stubs.TransferBuffer.__name__: TransferBufferMock, + }), + client_id=client_id, + ) + + results = flow_test_lib.GetFlowResults(client_id, flow_id) + self.assertLen(results, 1) + + result = results[0] + self.assertIsInstance(result, crowdstrike.GetCrowdstrikeAgentIdResult) + self.assertEqual(result.agent_id, agent_id_hex) + + +if __name__ == "__main__": + absltest.main() diff --git a/grr/server/grr_response_server/flows/general/discovery.py b/grr/server/grr_response_server/flows/general/discovery.py index ccfe202fba..c848ad8c83 100644 --- a/grr/server/grr_response_server/flows/general/discovery.py +++ b/grr/server/grr_response_server/flows/general/discovery.py @@ -3,13 +3,10 @@ import logging from typing import Any -from typing import List -from typing import Sequence from google.protobuf import any_pb2 from grr_response_core import config from grr_response_core.lib import rdfvalue -from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs from grr_response_core.lib.rdfvalues import cloud as rdf_cloud @@ -29,6 +26,7 @@ from grr_response_server import server_stubs from grr_response_server.databases import db from grr_response_server.flows.general import collectors +from grr_response_server.flows.general import crowdstrike from grr_response_server.rdfvalues import objects as rdf_objects from grr_response_proto import rrg_pb2 from grr_response_proto.rrg import os_pb2 as rrg_os_pb2 @@ -112,13 +110,11 @@ def Start(self): server_stubs.EnumerateFilesystems, next_state=self.EnumerateFilesystems.__name__) - flow_args_cls = rdf_artifacts.ArtifactCollectorFlowArgs - if config.CONFIG["Artifacts.edr_agents"]: + if config.CONFIG["Interrogate.collect_crowdstrike_agent_id"]: self.CallFlow( - collectors.ArtifactCollectorFlow.__name__, - artifact_list=config.CONFIG["Artifacts.edr_agents"], - dependencies=flow_args_cls.Dependency.IGNORE_DEPS, - next_state=self.ProcessEdrAgents.__name__) + crowdstrike.GetCrowdStrikeAgentID.__name__, + next_state=self.ProcessGetCrowdStrikeAgentID.__name__, + ) @flow_base.UseProto2AnyResponses def HandleRRGGetSystemMetadata( @@ -432,19 +428,23 @@ def ClientLibraries(self, responses): for k, v in response.items(): self.state.client.library_versions.Append(key=k, value=str(v)) - def ProcessEdrAgents(self, responses: Sequence[Any]) -> None: + def ProcessGetCrowdStrikeAgentID( + self, + responses: flow_responses.Responses[Any], + ) -> None: if not responses.success: + status = responses.status + self.Log("failed to obtain CrowdStrike agent identifier: %s", status) return - edr_agents: List[rdf_client.EdrAgent] = [] - for response in responses: - if not isinstance(response, rdf_client.EdrAgent): - raise TypeError(f"Unexpected EDR agent response type: {type(response)}") - - edr_agents.append(response) + if not isinstance(response, crowdstrike.GetCrowdstrikeAgentIdResult): + raise TypeError(f"Unexpected response type: {type(response)}") - self.state.client.edr_agents = edr_agents + edr_agent = rdf_client.EdrAgent() + edr_agent.name = "CrowdStrike" + edr_agent.agent_id = response.agent_id + self.state.client.edr_agents.append(edr_agent) def NotifyAboutEnd(self): notification.Notify( @@ -480,6 +480,12 @@ def End(self, responses): data_store.REL_DB.AddClientLabels(self.state.client.client_id, "GRR", labels) + # Reset foreman rules check so active hunts can match against the new data + data_store.REL_DB.WriteClientMetadata( + self.client_id, + last_foreman=rdfvalue.RDFDatetime.EarliestDatabaseSafeValue(), + ) + class EnrolmentInterrogateEvent(events.EventListener): """An event handler which will schedule interrogation on client enrollment.""" diff --git a/grr/server/grr_response_server/flows/general/discovery_test.py b/grr/server/grr_response_server/flows/general/discovery_test.py index c081cfff58..b8f956145d 100644 --- a/grr/server/grr_response_server/flows/general/discovery_test.py +++ b/grr/server/grr_response_server/flows/general/discovery_test.py @@ -1,18 +1,17 @@ #!/usr/bin/env python """Tests for Interrogate.""" +import binascii import datetime +import os import platform import socket -from typing import Iterable -from typing import Iterator from unittest import mock from absl import app from grr_response_client.client_actions import admin from grr_response_core import config -from grr_response_core.lib import parsers -from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts +from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import client_action as rdf_client_action from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs @@ -31,6 +30,7 @@ from grr_response_server.databases import db_test_utils from grr_response_server.flows.general import discovery from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects +from grr_response_server.rdfvalues import objects as rdf_objects from grr.test_lib import acl_test_lib from grr.test_lib import action_mocks from grr.test_lib import db_test_lib @@ -411,63 +411,55 @@ def testFleetspeakClient_OnlyGRRLabels(self, mock_labels_fn): ] self.assertCountEqual([l.name for l in rdf_labels], expected_labels) - def testEdrAgentCollection(self): + def testCrowdStrikeAgentIDCollection(self): + agent_id = binascii.hexlify(os.urandom(16)).decode("ascii") client_id = db_test_utils.InitializeClient(data_store.REL_DB) - artifact_source = rdf_artifacts.ArtifactSource() - artifact_source.type = rdf_artifacts.ArtifactSource.SourceType.COMMAND - artifact_source.attributes = {"cmd": "/bin/echo", "args": ["1337"]} + client_snapshot = rdf_objects.ClientSnapshot() + client_snapshot.client_id = client_id + client_snapshot.knowledge_base.os = "Linux" + data_store.REL_DB.WriteClientSnapshot(client_snapshot) - artifact = rdf_artifacts.Artifact() - artifact.name = "Foo" - artifact.doc = "Lorem ipsum." - artifact.sources = [artifact_source] - - class FooParser(parsers.SingleResponseParser): - - supported_artifacts = ["Foo"] - - def ParseResponse( - self, - knowledge_base: rdf_client.KnowledgeBase, - response: rdf_client_action.ExecuteResponse, - ) -> Iterator[rdf_client.EdrAgent]: - edr_agent = rdf_client.EdrAgent() - edr_agent.name = "echo" - edr_agent.agent_id = response.stdout.decode("utf-8") - - yield edr_agent - - class EchoActionMock(action_mocks.InterrogatedClient): + class ClientMock(action_mocks.InterrogatedClient): def ExecuteCommand( self, args: rdf_client_action.ExecuteRequest, - ) -> Iterable[rdf_client_action.ExecuteResponse]: - response = rdf_client_action.ExecuteResponse() - response.stdout = " ".join(args.args).encode("utf-8") - response.exit_status = 0 - - return [response] - - with mock.patch.object(artifact_registry, "REGISTRY", - artifact_registry.ArtifactRegistry()) as registry: - registry.RegisterArtifact(artifact) - - with test_lib.ConfigOverrider({"Artifacts.edr_agents": ["Foo"]}): - with parser_test_lib._ParserContext("Foo", FooParser): - flow_test_lib.TestFlowHelper( - discovery.Interrogate.__name__, - client_mock=EchoActionMock(), - client_id=client_id, - creator=self.test_username) - - flow_test_lib.FinishAllFlowsOnClient(client_id) + ) -> rdf_client_action.ExecuteResponse: + del args # Unused. + + stdout = f'cid="4815162342",aid="{agent_id}"' + + result = rdf_client_action.ExecuteResponse() + result.stdout = stdout.encode("ascii") + yield result + + # Without clearing the artifact registry, the flow gets stuck. It is most + # likely caused by some artifact waiting for something to be initialized or + # other terrible dependency but I am too tired of trying to figure out what + # exactly is the issue. + with mock.patch.object( + artifact_registry, + "REGISTRY", + artifact_registry.ArtifactRegistry(), + ): + with test_lib.ConfigOverrider({ + "Interrogate.collect_crowdstrike_agent_id": True, + "Artifacts.knowledge_base": [], + "Artifacts.knowledge_base_additions": [], + "Artifacts.non_kb_interrogate_artifacts": [], + }): + flow_test_lib.TestFlowHelper( + discovery.Interrogate.__name__, + client_mock=ClientMock(), + client_id=client_id, + ) + flow_test_lib.FinishAllFlowsOnClient(client_id) - snapshot = data_store.REL_DB.ReadClientSnapshot(client_id) - self.assertLen(snapshot.edr_agents, 1) - self.assertEqual(snapshot.edr_agents[0].name, "echo") - self.assertEqual(snapshot.edr_agents[0].agent_id, "1337") + client_snapshot = data_store.REL_DB.ReadClientSnapshot(client_id) + self.assertLen(client_snapshot.edr_agents, 1) + self.assertEqual(client_snapshot.edr_agents[0].name, "CrowdStrike") + self.assertEqual(client_snapshot.edr_agents[0].agent_id, agent_id) @parser_test_lib.WithAllParsers def testSourceFlowIdIsSet(self): @@ -669,6 +661,29 @@ def testStartBothAgents(self, db: abstract_db.Database): self.assertTrue(_HasClientActionRequest(flow, server_stubs.GetClientInfo)) self.assertTrue(_HasRRGRequest(flow, rrg_pb2.Action.GET_SYSTEM_METADATA)) + @parser_test_lib.WithAllParsers + def testForemanTimeIsResetOnClientSnapshotWrite(self): + client_id = self._SetupMinimalClient() + data_store.REL_DB.WriteClientMetadata( + client_id, + last_foreman=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3600), + ) + client_mock = action_mocks.InterrogatedClient() + client_mock.InitializeClient() + with test_lib.SuppressLogs(): + flow_test_lib.TestFlowHelper( + discovery.Interrogate.__name__, + client_mock, + creator=self.test_username, + client_id=client_id, + ) + + md = data_store.REL_DB.ReadClientMetadata(client_id) + self.assertIsNotNone(md.last_foreman_time) + self.assertEqual( + md.last_foreman_time, rdfvalue.RDFDatetime.EarliestDatabaseSafeValue() + ) + def _HasClientActionRequest( flow: discovery.Interrogate, @@ -677,8 +692,8 @@ def _HasClientActionRequest( """Checks whether the given flow has a request for the given action.""" action_id = action_registry.ID_BY_ACTION_STUB[action] - def IsAction(request: rdf_flows.ClientActionRequest) -> bool: - return request.action_identifier == action_id + def IsAction(request: rdf_flows.GrrMessage) -> bool: + return request.name == action_id return any(map(IsAction, flow.client_action_requests)) diff --git a/grr/server/grr_response_server/flows/general/dummy.py b/grr/server/grr_response_server/flows/general/dummy.py new file mode 100644 index 0000000000..a02143687f --- /dev/null +++ b/grr/server/grr_response_server/flows/general/dummy.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +"""Flow that sends a message to the client and back as example.""" + +from grr_response_core.lib.rdfvalues import dummy as rdf_dummy +from grr_response_core.lib.rdfvalues import structs as rdf_structs +from grr_response_proto import dummy_pb2 +from grr_response_server import flow_base +from grr_response_server import flow_responses +from grr_response_server import server_stubs + + +class DummyArgs(rdf_structs.RDFProtoStruct): + """Request for Dummy action.""" + + protobuf = dummy_pb2.DummyArgs + rdf_deps = [] + + +class DummyFlowResult(rdf_structs.RDFProtoStruct): + """Result for Dummy action.""" + + protobuf = dummy_pb2.DummyFlowResult + rdf_deps = [] + + +class Dummy(flow_base.FlowBase): + """A mechanism to send a string to the client and back. + + Returns to parent flow: + A DummyFlowResult with a modified string. + """ + + friendly_name = "Dummy Example Flow" + category = "/Administrative/" + behaviours = flow_base.BEHAVIOUR_DEBUG + + args_type = DummyArgs + result_types = (DummyFlowResult,) + + def Start(self): + """Schedules the action in the client (Dummy ClientAction).""" + + if not self.args.flow_input: + raise ValueError("args.flow_input is empty, cannot proceed!") + + request = rdf_dummy.DummyRequest( + action_input=f"args.flow_input: '{self.args.flow_input}'" + ) + self.CallClient( + server_stubs.Dummy, + request, + next_state=self.ReceiveActionOutput.__name__, + ) + + self.Log("Finished Start.") + + def ReceiveActionOutput( + self, responses: flow_responses.Responses[rdf_dummy.DummyResult] + ): + """Receives the action output and processes it.""" + # Checks the "Status" of the action, attaching information to the flow.A + if not responses.success: + raise flow_base.FlowError(responses.status) + + if len(responses) != 1: + raise flow_base.FlowError( + "Oops, something weird happened. Expected a single response, but" + f" got {list(responses)}" + ) + + result = DummyFlowResult( + flow_output=( + f"responses.action_output: '{list(responses)[0].action_output}'" + ) + ) + self.SendReply(result) + + self.Log("Finished ReceiveActionOutput.") diff --git a/grr/server/grr_response_server/flows/general/dummy_test.py b/grr/server/grr_response_server/flows/general/dummy_test.py new file mode 100644 index 0000000000..0c332a5edc --- /dev/null +++ b/grr/server/grr_response_server/flows/general/dummy_test.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python +"""Tests for dummy flow.""" + +from absl import app + +from grr_response_client import actions +from grr_response_core.lib.rdfvalues import dummy as rdf_dummy +from grr_response_server.flows.general import dummy +from grr.test_lib import action_mocks +from grr.test_lib import flow_test_lib +from grr.test_lib import test_lib + +# pylint:mode=test + + +# Mocks the Dummy Client Action. +class DummyActionReturnsOnce(actions.ActionPlugin): + """Sends a single Reply (like real action would).""" + + in_rdfvalue = rdf_dummy.DummyRequest + out_rdfvalues = [rdf_dummy.DummyResult] + + def Run(self, args: rdf_dummy.DummyRequest) -> None: + self.SendReply(rdf_dummy.DummyResult(action_output="single")) + + +# Mocks the Dummy Client Action, sending two replies. +class DummyActionReturnsTwice(actions.ActionPlugin): + """Sends more than one Reply.""" + + in_rdfvalue = rdf_dummy.DummyRequest + out_rdfvalues = [rdf_dummy.DummyResult] + + def Run(self, args: rdf_dummy.DummyRequest) -> None: + self.SendReply(rdf_dummy.DummyResult(action_output="first")) + self.SendReply(rdf_dummy.DummyResult(action_output="second")) + + +class DummyTest(flow_test_lib.FlowTestsBaseclass): + """Test the Dummy Flow.""" + + def setUp(self): + super().setUp() + # We need a Client where we can execute the Flow/call the Action. + self.client_id = self.SetupClient(0) + + def testHasInput(self): + """Test that the Dummy flow works.""" + + flow_id = flow_test_lib.TestFlowHelper( + dummy.Dummy.__name__, + # Uses mocked implementation. + action_mocks.ActionMock.With({"Dummy": DummyActionReturnsOnce}), + creator=self.test_username, + client_id=self.client_id, + # Flow arguments + flow_input="batata", + ) + + results = flow_test_lib.GetFlowResults(self.client_id, flow_id) + self.assertLen(results, 1) + self.assertEqual( + "responses.action_output: 'single'", + results[0].flow_output, + ) + + def testFailsIfEmptyFlowInput(self): + """Test that the Dummy flow fails when there's no input.""" + + with self.assertRaisesRegex( + RuntimeError, r"args.flow_input is empty, cannot proceed!" + ): + flow_test_lib.TestFlowHelper( + dummy.Dummy.__name__, + # Should fail before calling the client + None, + creator=self.test_username, + client_id=self.client_id, + # Flow arguments are empty + ) + + def testFailsIfMultipleActionOutputs(self): + """Test that the Dummy flow fails when there's no input.""" + + with self.assertRaisesRegex( + RuntimeError, r".*Oops, something weird happened.*" + ): + flow_test_lib.TestFlowHelper( + dummy.Dummy.__name__, + # Uses mocked implementation. + action_mocks.ActionMock.With({"Dummy": DummyActionReturnsTwice}), + creator=self.test_username, + client_id=self.client_id, + # Flow arguments + flow_input="banana", + ) + + +def main(argv): + test_lib.main(argv) + + +if __name__ == "__main__": + app.run(main) diff --git a/grr/server/grr_response_server/flows/general/file_finder.py b/grr/server/grr_response_server/flows/general/file_finder.py index 43d29aeb74..b3d08b5554 100644 --- a/grr/server/grr_response_server/flows/general/file_finder.py +++ b/grr/server/grr_response_server/flows/general/file_finder.py @@ -5,6 +5,7 @@ from typing import Collection, Optional, Sequence, Set, Tuple from grr_response_core.lib import artifact_utils +from grr_response_core.lib import interpolation from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import client_action as rdf_client_action from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs @@ -23,20 +24,31 @@ from grr_response_server.rdfvalues import objects as rdf_objects -class FileFinder(transfer.MultiGetFileLogic, fingerprint.FingerprintFileLogic, - filesystem.GlobLogic, flow_base.FlowBase): +class LegacyFileFinder( + transfer.MultiGetFileLogic, + fingerprint.FingerprintFileLogic, + filesystem.GlobLogic, + flow_base.FlowBase, +): """This flow looks for files matching given criteria and acts on them. - FileFinder searches for files that match glob expressions. The "action" + LegacyFileFinder searches for files that match glob expressions. The "action" (e.g. Download) is applied to files that match all given "conditions". Matches are then written to the results collection. If there are no "conditions" specified, "action" is just applied to all found files. + + TODO: remove by EOY2024. + + This flow is scheduled for removal and is no longer tested (all file finder + related tests are using the ClientFileFinder or FileFinder, which is now + an alias to ClientFileFinder). """ - friendly_name = "File Finder" + + friendly_name = "Legacy File Finder (deprecated)" category = "/Filesystem/" args_type = rdf_file_finder.FileFinderArgs result_types = (rdf_file_finder.FileFinderResult,) - behaviours = flow_base.BEHAVIOUR_BASIC + behaviours = flow_base.BEHAVIOUR_DEBUG # Will be used by FingerprintFileLogic. fingerprint_file_mixin_client_action = server_stubs.HashFile @@ -453,6 +465,12 @@ def Start(self): """Issue the find request.""" super().Start() + # Do not do anything if no paths are specified in the arguments. + if not self.args.paths: + self.Log("No paths provided, finishing.") + self.state.files_found = 0 + return + if self.args.pathtype == rdf_paths.PathSpec.PathType.OS: stub = server_stubs.FileFinderOS else: @@ -472,15 +490,22 @@ def Start(self): def _InterpolatePaths(self, globs: Sequence[str]) -> Optional[Sequence[str]]: kb = self.client_knowledge_base - if kb is None: - self.Error("No knowledgebase available for path interpolation") - return None - paths = list() missing_attrs = list() unknown_attrs = list() for glob in globs: + # Only fail hard on missing knowledge base if there's actual + # interpolation to be done. + if kb is None: + interpolator = interpolation.Interpolator(str(glob)) + if interpolator.Vars() or interpolator.Scopes(): + self.Log( + f"Skipping glob '{glob}': can't interpolate with an " + "empty knowledge base" + ) + continue + try: paths.extend(artifact_utils.InterpolateKbAttributes(str(glob), kb)) except artifact_utils.KbInterpolationMissingAttributesError as error: @@ -497,6 +522,13 @@ def _InterpolatePaths(self, globs: Sequence[str]) -> Optional[Sequence[str]]: self.Error(f"Unknown knowledgebase attributes: {unknown_attrs}") return None + if not paths: + self.Error( + "All globs skipped, as there's no knowledgebase available for" + " interpolation" + ) + return None + return paths def StoreResultsWithoutBlobs( @@ -518,7 +550,7 @@ def StoreResultsWithoutBlobs( elif response.HasField("stat_entry"): stat_entry_responses.append(response) - self._WriteStatEntries([r.stat_entry for r in stat_entry_responses]) + filesystem.WriteFileFinderResults(stat_entry_responses, self.client_id) for r in stat_entry_responses: self.SendReply(r) @@ -649,11 +681,16 @@ def _WriteFilesContent( return client_path_hash_id - def _WriteStatEntries(self, stat_entries): - filesystem.WriteStatEntries(stat_entries, client_id=self.client_id) - def End(self, responses): super().End(responses) if self.rdf_flow.flow_state != flows_pb2.Flow.ERROR: self.Log("Found and processed %d files.", self.state.files_found) + + +# TODO decide on the FileFinder name and remove the legacy alias. +class FileFinder(ClientFileFinder): + """An alias for ClientFileFinder.""" + + friendly_name = "File Finder" + behaviours = flow_base.BEHAVIOUR_BASIC diff --git a/grr/server/grr_response_server/flows/general/file_finder_test.py b/grr/server/grr_response_server/flows/general/file_finder_test.py index 5d9db53784..aa4481b638 100644 --- a/grr/server/grr_response_server/flows/general/file_finder_test.py +++ b/grr/server/grr_response_server/flows/general/file_finder_test.py @@ -5,20 +5,22 @@ import hashlib import io import os +import shutil import stat import struct +import time from typing import Any, List, Optional, Sequence from unittest import mock from absl import app from grr_response_client import vfs +from grr_response_client.client_actions.file_finder_utils import uploading from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder from grr_response_core.lib.rdfvalues import paths as rdf_paths -from grr_response_core.lib.rdfvalues import structs as rdf_structs from grr_response_core.lib.util import temp from grr_response_proto import flows_pb2 from grr_response_server import data_store @@ -49,19 +51,16 @@ def FilenameToPathComponents(self, fname): path = os.path.join(self.base_path, "searching", fname).lstrip("/") return tuple(path.split(os.path.sep)) - EXPECTED_HASHES = { - "auth.log": ("67b8fc07bd4b6efc3b2dce322e8ddf609b540805", - "264eb6ff97fc6c37c5dd4b150cb0a797", - "91c8d6287a095a6fa6437dac50ffe3fe5c5e0d06dff" - "3ae830eedfce515ad6451"), - "dpkg.log": ("531b1cfdd337aa1663f7361b2fd1c8fe43137f4a", - "26973f265ce5ecc1f86bc413e65bfc1d", - "48303a1e7ceec679f6d417b819f42779575ffe8eabf" - "9c880d286a1ee074d8145"), - "dpkg_false.log": ("a2c9cc03c613a44774ae97ed6d181fe77c13e01b", - "ab48f3548f311c77e75ac69ac4e696df", - "a35aface4b45e3f1a95b0df24efc50e14fbedcaa6a7" - "50ba32358eaaffe3c4fb0") + EXPECTED_SHA256_HASHES = { + "auth.log": ( + "91c8d6287a095a6fa6437dac50ffe3fe5c5e0d06dff3ae830eedfce515ad6451" + ), + "dpkg.log": ( + "48303a1e7ceec679f6d417b819f42779575ffe8eabf9c880d286a1ee074d8145" + ), + "dpkg_false.log": ( + "a35aface4b45e3f1a95b0df24efc50e14fbedcaa6a750ba32358eaaffe3c4fb0" + ), } def CheckFilesHashed(self, fnames): @@ -69,7 +68,7 @@ def CheckFilesHashed(self, fnames): for fname in fnames: try: - file_hashes = self.EXPECTED_HASHES[fname] + file_hash = self.EXPECTED_SHA256_HASHES[fname] except KeyError: raise RuntimeError("Can't check unexpected result for correct " "hashes: %s" % fname) @@ -80,9 +79,7 @@ def CheckFilesHashed(self, fnames): components=self.FilenameToPathComponents(fname)) hash_obj = path_info.hash_entry - self.assertEqual(str(hash_obj.sha1), file_hashes[0]) - self.assertEqual(str(hash_obj.md5), file_hashes[1]) - self.assertEqual(str(hash_obj.sha256), file_hashes[2]) + self.assertEqual(str(hash_obj.sha256), file_hash) def CheckFilesNotHashed(self, fnames): for fname in fnames: @@ -127,61 +124,69 @@ def CheckFilesNotDownloaded(self, fnames): rdf_objects.PathInfo.PathType.OS, components=self.FilenameToPathComponents(fname))) self.fail("Found downloaded file: %s" % fname) - except file_store.FileHasNoContentError: + except (file_store.FileHasNoContentError, file_store.FileNotFoundError): pass - def CheckFiles(self, fnames, results): - if fnames is None: + def CheckFiles(self, expected_fnames, skipped_fnames, results): + if expected_fnames is None: self.assertFalse(results) return # If results are expected, check that they are present in the results. # Also check that there are no other files. - self.assertLen(fnames, len(fnames)) + self.assertLen(results, len(set(expected_fnames + skipped_fnames))) for r in results: self.assertIsInstance(r, rdf_file_finder.FileFinderResult) self.assertCountEqual( - [os.path.basename(r.stat_entry.pathspec.path) for r in results], fnames) + [os.path.basename(r.stat_entry.pathspec.path) for r in results], + expected_fnames + skipped_fnames, + ) - def CheckReplies(self, replies, action, expected_files): + def CheckReplies(self, replies, action, expected_files, skipped_files): reply_count = 0 for reply in replies: self.assertIsInstance(reply, rdf_file_finder.FileFinderResult) + is_skipped = reply.stat_entry.pathspec.Basename() in skipped_files + reply_count += 1 if action == rdf_file_finder.FileFinderAction.Action.STAT: self.assertTrue(reply.stat_entry) self.assertFalse(reply.hash_entry) elif action == rdf_file_finder.FileFinderAction.Action.DOWNLOAD: self.assertTrue(reply.stat_entry) - self.assertTrue(reply.hash_entry) + if not is_skipped: + self.assertTrue(reply.hash_entry) elif action == rdf_file_finder.FileFinderAction.Action.HASH: self.assertTrue(reply.stat_entry) - self.assertTrue(reply.hash_entry) + if not is_skipped: + self.assertTrue(reply.hash_entry) - if action != rdf_file_finder.FileFinderAction.Action.STAT: + if ( + action != rdf_file_finder.FileFinderAction.Action.STAT + and not is_skipped + ): # Check that file's hash is correct. file_basename = reply.stat_entry.pathspec.Basename() try: - file_hashes = self.EXPECTED_HASHES[file_basename] + file_hash = self.EXPECTED_SHA256_HASHES[file_basename] except KeyError: raise RuntimeError("Can't check unexpected result for correct " "hashes: %s" % file_basename) - self.assertEqual(str(reply.hash_entry.sha1), file_hashes[0]) - self.assertEqual(str(reply.hash_entry.md5), file_hashes[1]) - self.assertEqual(str(reply.hash_entry.sha256), file_hashes[2]) + self.assertEqual(str(reply.hash_entry.sha256), file_hash) - self.assertEqual(reply_count, len(expected_files)) + # Skipped files are reported, but not collected/hashed (i.e. the action is + # skipped). + self.assertEqual(reply_count, len(expected_files) + len(skipped_files)) def RunFlow( self, paths: Optional[List[str]] = None, conditions: Optional[List[rdf_file_finder.FileFinderCondition]] = None, action: Optional[rdf_file_finder.FileFinderAction] = None, - implementation_type: Optional[rdf_structs.EnumNamedValue] = None ) -> Sequence[Any]: self.last_session_id = flow_test_lib.TestFlowHelper( file_finder.FileFinder.__name__, @@ -189,10 +194,10 @@ def RunFlow( client_id=self.client_id, paths=paths or [self.path], pathtype=rdf_paths.PathSpec.PathType.OS, - implementation_type=implementation_type, action=action, conditions=conditions, - creator=self.test_username) + creator=self.test_username, + ) return flow_test_lib.GetFlowResults(self.client_id, self.last_session_id) def RunFlowAndCheckResults( @@ -201,7 +206,9 @@ def RunFlowAndCheckResults( action=rdf_file_finder.FileFinderAction.Action.STAT, expected_files=None, non_expected_files=None, - paths=None): + skipped_files=None, + paths=None, + ): if not isinstance(action, rdf_file_finder.FileFinderAction): action = rdf_file_finder.FileFinderAction(action_type=action) action_type = action.action_type @@ -209,30 +216,38 @@ def RunFlowAndCheckResults( conditions = conditions or [] expected_files = expected_files or [] non_expected_files = non_expected_files or [] + skipped_files = skipped_files or [] results = self.RunFlow(paths=paths, conditions=conditions, action=action) - self.CheckReplies(results, action_type, expected_files) + self.CheckReplies(results, action_type, expected_files, skipped_files) - self.CheckFiles(expected_files, results) + self.CheckFiles(expected_files, skipped_files, results) if action_type == rdf_file_finder.FileFinderAction.Action.STAT: - self.CheckFilesNotDownloaded(expected_files + non_expected_files) - self.CheckFilesNotHashed(expected_files + non_expected_files) + self.CheckFilesNotDownloaded( + expected_files + non_expected_files + skipped_files + ) + self.CheckFilesNotHashed( + expected_files + non_expected_files + skipped_files + ) elif action_type == rdf_file_finder.FileFinderAction.Action.DOWNLOAD: self.CheckFilesHashed(expected_files) - self.CheckFilesNotHashed(non_expected_files) + self.CheckFilesNotHashed(non_expected_files + skipped_files) self.CheckFilesDownloaded(expected_files) - self.CheckFilesNotDownloaded(non_expected_files) + self.CheckFilesNotDownloaded(non_expected_files + skipped_files) # Downloaded files are hashed to allow for deduping. elif action_type == rdf_file_finder.FileFinderAction.Action.HASH: - self.CheckFilesNotDownloaded(expected_files + non_expected_files) + self.CheckFilesNotDownloaded( + expected_files + non_expected_files + skipped_files + ) self.CheckFilesHashed(expected_files) - self.CheckFilesNotHashed(non_expected_files) + self.CheckFilesNotHashed(non_expected_files + skipped_files) return results def setUp(self): super().setUp() - self.client_mock = action_mocks.FileFinderClientMockWithTimestamps() + self.client_mock = action_mocks.ClientFileFinderClientMock() + self.fixture_path = os.path.join(self.base_path, "searching") self.path = os.path.join(self.fixture_path, "*.log") self.client_id = self.SetupClient(0) @@ -388,7 +403,9 @@ def testLiteralMatchConditionWithDifferentActions(self): # Check that the results' matches fields are correctly filled. self.assertLen(results, 1) self.assertLen(results[0].matches, 1) - self.assertEqual(results[0].matches[0].offset, 350) + # The match is at offset 350, but we have bytes_before=10, so the offset + # of the returned BufferReference is 350-10=340. + self.assertEqual(results[0].matches[0].offset, 340) self.assertEqual(results[0].matches[0].data, b"session): session opened for user dearjohn by (uid=0") @@ -439,7 +456,9 @@ def testRegexMatchConditionWithDifferentActions(self): self.assertLen(results, 1) self.assertLen(results[0].matches, 1) - self.assertEqual(results[0].matches[0].offset, 350) + # The match is at offset 350, but we have bytes_before=10, so the offset + # of the returned BufferReference is 350-10=340. + self.assertEqual(results[0].matches[0].offset, 340) self.assertEqual(results[0].matches[0].data, b"session): session opened for user dearjohn by (uid=0") @@ -475,10 +494,14 @@ def testTwoRegexMatchConditionsWithDifferentActions1(self): self.assertLen(results, 1) self.assertLen(results[0].matches, 2) - self.assertEqual(results[0].matches[0].offset, 350) + # The match is at offset 350, but we have bytes_before=10, so the offset + # of the returned BufferReference is 350-10=340. + self.assertEqual(results[0].matches[0].offset, 340) self.assertEqual(results[0].matches[0].data, b"session): session opened for user dearjohn by (uid=0") - self.assertEqual(results[0].matches[1].offset, 513) + # The match is at offset 513, but we have bytes_before=10, so the offset + # of the returned BufferReference is 513-10=503. + self.assertEqual(results[0].matches[1].offset, 503) self.assertEqual(results[0].matches[1].data, b"rong line format.... should not be he") @@ -512,7 +535,9 @@ def testTwoRegexMatchConditionsWithDifferentActions2(self): self.assertLen(results, 1) self.assertLen(results[0].matches, 2) - self.assertEqual(results[0].matches[0].offset, 350) + # The match is at offset 350, but we have bytes_before=10, so the offset + # of the returned BufferReference is 350-10=340. + self.assertEqual(results[0].matches[0].offset, 340) self.assertEqual(results[0].matches[0].data, b"session): session opened for user dearjohn by (uid=0") self.assertEqual(results[0].matches[1].offset, 0) @@ -541,7 +566,7 @@ def testSizeConditionWithDifferentActions(self): def testDownloadAndHashActionSizeLimitWithSkipPolicy(self): expected_files = ["dpkg.log", "dpkg_false.log"] - non_expected_files = ["auth.log"] + skipped_files = ["auth.log"] sizes = [ os.stat(os.path.join(self.fixture_path, f)).st_size for f in expected_files @@ -557,7 +582,8 @@ def testDownloadAndHashActionSizeLimitWithSkipPolicy(self): paths=[self.path], action=action, expected_files=expected_files, - non_expected_files=non_expected_files) + skipped_files=skipped_files, + ) def testDownloadAndHashActionSizeLimitWithHashTruncatedPolicy(self): image_path = os.path.join(self.base_path, "test_img.dd") @@ -599,7 +625,7 @@ def testDownloadActionSizeLimitWithDownloadTruncatedPolicy(self): with io.open(image_path, "rb") as fd: expected_data = fd.read(expected_size) - d = hashlib.sha1() + d = hashlib.sha256() d.update(expected_data) expected_hash = d.digest() @@ -608,21 +634,24 @@ def testDownloadActionSizeLimitWithDownloadTruncatedPolicy(self): self.assertEqual(data, expected_data) path_info = self._ReadTestPathInfo( - ["test_img.dd"], path_type=rdf_objects.PathInfo.PathType.OS) - self.assertEqual(path_info.hash_entry.sha1, expected_hash) + ["test_img.dd"], path_type=rdf_objects.PathInfo.PathType.OS + ) self.assertEqual(path_info.hash_entry.num_bytes, expected_size) + self.assertEqual(path_info.hash_entry.sha256, expected_hash) - # Setting MIN_CALL_TO_FILE_STORE to a smaller value emulates MultiGetFile's - # behavior when dealing with large files. - @mock.patch.object(file_finder.FileFinder, "MIN_CALL_TO_FILE_STORE", 1) def testDownloadActionWithMultipleAttemptsWithMultipleSizeLimits(self): total_num_chunks = 10 - total_size = total_num_chunks * file_finder.FileFinder.CHUNK_SIZE + total_size = ( + total_num_chunks * uploading.TransferStoreUploader.DEFAULT_CHUNK_SIZE + ) path = os.path.join(self.temp_dir, "test_big.txt") with io.open(path, "wb") as fd: for i in range(total_num_chunks): - fd.write(struct.pack("b", i) * file_finder.FileFinder.CHUNK_SIZE) + fd.write( + struct.pack("b", i) + * uploading.TransferStoreUploader.DEFAULT_CHUNK_SIZE + ) da = rdf_file_finder.FileFinderDownloadActionOptions @@ -633,8 +662,9 @@ def testDownloadActionWithMultipleAttemptsWithMultipleSizeLimits(self): # of the current run was much bigger than the size of the previously # fetched file. action = rdf_file_finder.FileFinderAction.Download( - max_size=2 * file_finder.FileFinder.CHUNK_SIZE, - oversized_file_policy=da.OversizedFilePolicy.DOWNLOAD_TRUNCATED) + max_size=2 * uploading.TransferStoreUploader.DEFAULT_CHUNK_SIZE, + oversized_file_policy=da.OversizedFilePolicy.DOWNLOAD_TRUNCATED, + ) self.RunFlow(paths=[path], action=action) action = rdf_file_finder.FileFinderAction.Download( @@ -695,55 +725,162 @@ def testSizeAndRegexConditionsWithDifferentActions(self): def testModificationTimeConditionWithDifferentActions(self): expected_files = ["dpkg.log", "dpkg_false.log"] non_expected_files = ["auth.log"] - change_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1444444440) - modification_time_condition = rdf_file_finder.FileFinderCondition( - condition_type=rdf_file_finder.FileFinderCondition.Type - .MODIFICATION_TIME, - modification_time=rdf_file_finder.FileFinderModificationTimeCondition( - min_last_modified_time=change_time)) - for action in self.CONDITION_TESTS_ACTIONS: - self.RunFlowAndCheckResults( - action=action, - conditions=[modification_time_condition], - expected_files=expected_files, - non_expected_files=non_expected_files) + with temp.AutoTempDirPath(remove_non_empty=True) as tempdir: + os.mkdir(os.path.join(tempdir, "searching")) + for fname in expected_files + non_expected_files: + shutil.copyfile( + os.path.join(self.fixture_path, fname), + os.path.join(tempdir, "searching", fname), + ) + + # TODO: the complexity of these tests and their reliance on + # shared state is horrible. All these tests should be rewritten. + self.base_path = tempdir + self.fixture_path = os.path.join(self.base_path, "searching") + + os.utime( + os.path.join(self.fixture_path, "dpkg.log"), + times=( + change_time.AsSecondsSinceEpoch() + 1, + change_time.AsSecondsSinceEpoch() + 1, + ), + ) + os.utime( + os.path.join(self.fixture_path, "dpkg_false.log"), + times=( + change_time.AsSecondsSinceEpoch() + 2, + change_time.AsSecondsSinceEpoch() + 2, + ), + ) + os.utime( + os.path.join(self.fixture_path, "auth.log"), + times=( + change_time.AsSecondsSinceEpoch() - 1, + change_time.AsSecondsSinceEpoch() - 1, + ), + ) + + modification_time_condition = rdf_file_finder.FileFinderCondition( + condition_type=rdf_file_finder.FileFinderCondition.Type.MODIFICATION_TIME, + modification_time=rdf_file_finder.FileFinderModificationTimeCondition( + min_last_modified_time=change_time + ), + ) + + for action in self.CONDITION_TESTS_ACTIONS: + with self.subTest(action): + self.RunFlowAndCheckResults( + paths=[os.path.join(self.fixture_path, "*.log")], + action=action, + conditions=[modification_time_condition], + expected_files=expected_files, + non_expected_files=non_expected_files, + ) def testAccessTimeConditionWithDifferentActions(self): expected_files = ["dpkg.log", "dpkg_false.log"] non_expected_files = ["auth.log"] - change_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1444444440) - access_time_condition = rdf_file_finder.FileFinderCondition( - condition_type=rdf_file_finder.FileFinderCondition.Type.ACCESS_TIME, - access_time=rdf_file_finder.FileFinderAccessTimeCondition( - min_last_access_time=change_time)) - for action in self.CONDITION_TESTS_ACTIONS: - self.RunFlowAndCheckResults( - action=action, - conditions=[access_time_condition], - expected_files=expected_files, - non_expected_files=non_expected_files) + with temp.AutoTempDirPath(remove_non_empty=True) as tempdir: + os.mkdir(os.path.join(tempdir, "searching")) + for fname in expected_files + non_expected_files: + shutil.copyfile( + os.path.join(self.fixture_path, fname), + os.path.join(tempdir, "searching", fname), + ) + + # TODO: the complexity of these tests and their reliance on + # shared state is horrible. All these tests should be rewritten. + self.base_path = tempdir + self.fixture_path = os.path.join(self.base_path, "searching") + + os.utime( + os.path.join(self.fixture_path, "dpkg.log"), + times=( + change_time.AsSecondsSinceEpoch() + 1, + change_time.AsSecondsSinceEpoch() + 1, + ), + ) + os.utime( + os.path.join(self.fixture_path, "dpkg_false.log"), + times=( + change_time.AsSecondsSinceEpoch() + 2, + change_time.AsSecondsSinceEpoch() + 2, + ), + ) + os.utime( + os.path.join(self.fixture_path, "auth.log"), + times=( + change_time.AsSecondsSinceEpoch() - 1, + change_time.AsSecondsSinceEpoch() - 1, + ), + ) + + modification_time_condition = rdf_file_finder.FileFinderCondition( + condition_type=rdf_file_finder.FileFinderCondition.Type.ACCESS_TIME, + access_time=rdf_file_finder.FileFinderAccessTimeCondition( + min_last_access_time=change_time + ), + ) + + for action in self.CONDITION_TESTS_ACTIONS: + with self.subTest(action): + self.RunFlowAndCheckResults( + paths=[os.path.join(self.fixture_path, "*.log")], + action=action, + conditions=[modification_time_condition], + expected_files=expected_files, + non_expected_files=non_expected_files, + ) def testInodeChangeTimeConditionWithDifferentActions(self): expected_files = ["dpkg.log", "dpkg_false.log"] non_expected_files = ["auth.log"] - change_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1444444440) - inode_change_time_condition = rdf_file_finder.FileFinderCondition( - condition_type=rdf_file_finder.FileFinderCondition.Type - .INODE_CHANGE_TIME, - inode_change_time=rdf_file_finder.FileFinderInodeChangeTimeCondition( - min_last_inode_change_time=change_time)) + with temp.AutoTempDirPath(remove_non_empty=True) as tempdir: + os.mkdir(os.path.join(tempdir, "searching")) + for fname in non_expected_files + expected_files: + time.sleep(0.1) + shutil.copyfile( + os.path.join(self.fixture_path, fname), + os.path.join(tempdir, "searching", fname), + ) - for action in self.CONDITION_TESTS_ACTIONS: - self.RunFlowAndCheckResults( - action=action, - conditions=[inode_change_time_condition], - expected_files=expected_files, - non_expected_files=non_expected_files) + # In the loop above auth.log is written first, so if we take a timestamp + # that's right after its inode change time, it should filter out auth.log, + # but keep dpkg.log and dpkg_false.log, as they would match the condition. + auth_log_ctime_ns = int( + os.stat(os.path.join(tempdir, "searching", "auth.log")).st_ctime_ns + * 1e-3 + ) + change_time = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch( + auth_log_ctime_ns + 1 + ) + + # TODO: the complexity of these tests and their reliance on + # shared state is horrible. All these tests should be rewritten. + self.base_path = tempdir + self.fixture_path = os.path.join(self.base_path, "searching") + + inode_change_time_condition = rdf_file_finder.FileFinderCondition( + condition_type=rdf_file_finder.FileFinderCondition.Type.INODE_CHANGE_TIME, + inode_change_time=rdf_file_finder.FileFinderInodeChangeTimeCondition( + min_last_inode_change_time=change_time + ), + ) + + for action in self.CONDITION_TESTS_ACTIONS: + with self.subTest(action): + self.RunFlowAndCheckResults( + paths=[os.path.join(self.fixture_path, "*.log")], + action=action, + conditions=[inode_change_time_condition], + expected_files=expected_files, + non_expected_files=non_expected_files, + ) def _RunTSKFileFinder(self, paths): image_path = os.path.join(self.base_path, "ntfs_img.dd") @@ -758,12 +895,13 @@ def _RunTSKFileFinder(self, paths): with test_lib.SuppressLogs(): flow_test_lib.TestFlowHelper( file_finder.FileFinder.__name__, - self.client_mock, + client_mock=action_mocks.ClientFileFinderWithVFS(), client_id=self.client_id, paths=paths, pathtype=rdf_paths.PathSpec.PathType.TSK, action=rdf_file_finder.FileFinderAction(action_type=action), - creator=self.test_username) + creator=self.test_username, + ) def _ListTestChildPathInfos(self, path_components, @@ -791,44 +929,6 @@ def _ReadTestFile(self, db.ClientPath(self.client_id, path_type, components=tuple(components))) return fd.read(10000000) - def testRecursiveADSHandling(self): - """This tests some more obscure NTFS features - ADSs on directories.""" - self._RunTSKFileFinder(["adstest/**"]) - self._CheckDir() - self._CheckSubdir() - - def testADSHandling(self): - self._RunTSKFileFinder(["adstest/*"]) - self._CheckDir() - - def _CheckDir(self): - children = self._ListTestChildPathInfos(["ntfs_img.dd:32256", "adstest"]) - - # There should be four entries: - # one file, one directory, and one ADS for each. - self.assertLen(children, 4) - - data = self._ReadTestFile(["ntfs_img.dd:32256", "adstest", "a.txt"]) - self.assertEqual(data, b"This is a.txt") - data = self._ReadTestFile(["ntfs_img.dd:32256", "adstest", "a.txt:ads.txt"]) - self.assertEqual(data, b"This is the ads for a.txt") - data = self._ReadTestFile(["ntfs_img.dd:32256", "adstest", "dir:ads.txt"]) - self.assertEqual(data, b"This is the dir ads") - - def _CheckSubdir(self): - base_components = ["ntfs_img.dd:32256", "adstest", "dir"] - children = self._ListTestChildPathInfos(base_components) - - # There should be three entries: two files, one has an ADS. - self.assertLen(children, 3) - - data = self._ReadTestFile(base_components + ["b.txt"]) - self.assertEqual(data, b"This is b.txt") - data = self._ReadTestFile(base_components + ["b.txt:ads.txt"]) - self.assertEqual(data, b"This is the ads for b.txt") - data = self._ReadTestFile(base_components + ["no_ads.txt"]) - self.assertEqual(data, b"This file has no ads") - def testEmptyPathListDoesNothing(self): flow_test_lib.TestFlowHelper( file_finder.FileFinder.__name__, @@ -974,34 +1074,6 @@ def testLinksAndContent(self): self.assertLen(results, 2) -class TestFileFinderFlowWithImplementationType(TestFileFinderFlow): - - def RunFlow( - self, - paths: Optional[List[str]] = None, - conditions: Optional[List[rdf_file_finder.FileFinderCondition]] = None, - action: Optional[rdf_file_finder.FileFinderAction] = None, - implementation_type: Optional[rdf_structs.EnumNamedValue] = None - ) -> Sequence[Any]: - - results = super().RunFlow( - paths=paths, - conditions=conditions, - action=action, - implementation_type=rdf_paths.PathSpec.ImplementationType.DIRECT) - - for result in results: - if result.HasField("stat_entry"): - self.assertEqual(result.stat_entry.pathspec.implementation_type, - rdf_paths.PathSpec.ImplementationType.DIRECT) - for match in result.matches: - if match.HasField("pathspec"): - self.assertEqual(match.pathspec.implementation_type, - rdf_paths.PathSpec.ImplementationType.DIRECT) - - return results - - class TestClientFileFinderFlow(flow_test_lib.FlowTestsBaseclass): """Test the ClientFileFinder flow.""" @@ -1326,8 +1398,16 @@ def testLinksAndContent(self): results, _ = self._RunCFF([path_glob], action, conditions=[condition]) - self.assertLen(results, 1) - self.assertEqual(results[0].stat_entry.pathspec.path, path) + # ClientFileFinder follows links that point to regulat files by default, + # hence 2 results: one for the link and one for the file. + self.assertLen(results, 2) + self.assertCountEqual( + [ + results[0].stat_entry.pathspec.path, + results[1].stat_entry.pathspec.path, + ], + [path, lnk_path], + ) def testInterpolationMissingAttributes(self): creator = db_test_utils.InitializeUser(data_store.REL_DB) @@ -1389,7 +1469,36 @@ def testInterpolationUnknownAttributes(self): self.assertIn("foo", log_entries[0].message) self.assertIn("bar", log_entries[1].message) - def testInterpolationNoKnowledgebase(self): + def testSkipsGlobsWithInterpolationWhenNoKnowledgeBase(self): + creator = db_test_utils.InitializeUser(data_store.REL_DB) + client_id = db_test_utils.InitializeClient(data_store.REL_DB) + + # We do not write any snapshot not to have any knowledgebase for the client. + + flow_args = rdf_file_finder.FileFinderArgs() + flow_args.action = rdf_file_finder.FileFinderAction.Stat() + flow_args.paths = ["/var/foo", "%%os%%"] + + flow_id = flow_test_lib.StartFlow( + file_finder.ClientFileFinder, + creator=creator, + client_id=client_id, + flow_args=flow_args, + ) + + flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id) + self.assertEqual(flow_obj.flow_state, flows_pb2.Flow.RUNNING) + log_entries = data_store.REL_DB.ReadFlowLogEntries( + client_id=client_id, flow_id=flow_id, offset=0, count=1024 + ) + self.assertLen(log_entries, 1) + self.assertIn( + "Skipping glob '%%os%%': can't interpolate with an empty knowledge" + " base", + log_entries[0].message, + ) + + def testFailsIfAllGlobsWithAreSkippedDueToNoKnowledgeBase(self): creator = db_test_utils.InitializeUser(data_store.REL_DB) client_id = db_test_utils.InitializeClient(data_store.REL_DB) @@ -1407,7 +1516,11 @@ def testInterpolationNoKnowledgebase(self): flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id) self.assertEqual(flow_obj.flow_state, flows_pb2.Flow.ERROR) - self.assertIn("No knowledgebase available", flow_obj.error_message) + self.assertIn( + "All globs skipped, as there's no knowledgebase available for" + " interpolation", + flow_obj.error_message, + ) def main(argv): diff --git a/grr/server/grr_response_server/flows/general/filesystem.py b/grr/server/grr_response_server/flows/general/filesystem.py index 800d293bb6..467615a873 100644 --- a/grr/server/grr_response_server/flows/general/filesystem.py +++ b/grr/server/grr_response_server/flows/general/filesystem.py @@ -10,6 +10,7 @@ from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts from grr_response_core.lib.rdfvalues import client_action as rdf_client_action from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs +from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder from grr_response_core.lib.rdfvalues import paths as rdf_paths from grr_response_core.lib.rdfvalues import structs as rdf_structs from grr_response_proto import flows_pb2 @@ -96,6 +97,47 @@ def WriteStatEntries(stat_entries, client_id): _FilterOutPathInfoDuplicates(path_infos)) +def WriteFileFinderResults( + file_finder_results: Iterable[rdf_file_finder.FileFinderResult], + client_id: str, +) -> None: + """Persists information about file finder results. + + Args: + file_finder_results: A list of `FileFinderResult` instances. + client_id: An id of a client the stat entries come from. + """ + + path_infos = [] + for r in file_finder_results: + if r.stat_entry.pathspec.last.stream_name: + # This is an ADS. In this case we always need to create a file or + # we won't be able to access the data. New clients send the correct mode + # already but to make sure, we set this to a regular file anyways. + # Clear all file type bits: + r.stat_entry.st_mode &= ~stat_type_mask + r.stat_entry.st_mode |= stat.S_IFREG + + path_info = rdf_objects.PathInfo.FromStatEntry(r.stat_entry) + if r.HasField("hash_entry"): + path_info.hash_entry = r.hash_entry + path_infos.append(path_info) + + # NOTE: TSK may return duplicate entries. This is may be either due to + # a bug in TSK implementation, or due to the fact that TSK is capable + # of returning deleted files information. Our VFS data model only supports + # storing multiple versions of the files when we collect the versions + # ourselves. At the moment we can't store multiple versions of the files + # "as returned by TSK". + # + # Current behaviour is to simply drop excessive version before the + # WritePathInfo call. This way files returned by TSK will still make it + # into the flow's results, but not into the VFS data. + data_store.REL_DB.WritePathInfos( + client_id, _FilterOutPathInfoDuplicates(path_infos) + ) + + class ListDirectoryArgs(rdf_structs.RDFProtoStruct): protobuf = flows_pb2.ListDirectoryArgs rdf_deps = [ diff --git a/grr/server/grr_response_server/flows/general/find.py b/grr/server/grr_response_server/flows/general/find.py deleted file mode 100644 index 4604fae585..0000000000 --- a/grr/server/grr_response_server/flows/general/find.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python -"""Find files on the client.""" - -from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs -from grr_response_core.lib.rdfvalues import structs as rdf_structs -from grr_response_proto import flows_pb2 -from grr_response_server import data_store -from grr_response_server import flow_base -from grr_response_server import server_stubs -from grr_response_server.rdfvalues import objects as rdf_objects - - -class FindFilesArgs(rdf_structs.RDFProtoStruct): - protobuf = flows_pb2.FindFilesArgs - rdf_deps = [ - rdf_client_fs.FindSpec, - ] - - def Validate(self): - """Ensure that the request is sane.""" - self.findspec.Validate() - - -class FindFiles(flow_base.FlowBase): - r"""Find files on the client. - - The logic is: - - Find files under "Path" - - Filter for files with os.path.basename matching "Path Regular Expression" - - Filter for files with sizes between min and max limits - - Filter for files that contain "Data Regular Expression" in the first 1MB - of file data - - Return a StatEntry rdfvalue for each of the results - - Path and data regexes, and file size limits are optional. Don"t encode path - information in the regex. See correct usage below. - - Example: - - Path="/usr/local" - Path Regular Expression="admin" - - Match: "/usr/local/bin/admin" (file) - Match: "/usr/local/admin" (directory) - No Match: "/usr/admin/local/blah" - - The result from this flow is a list of StatEntry objects, one for - each file matching the criteria. Matching files will not be - downloaded by this flow, only the metadata of the file is fetched. - - Returns to parent flow: - rdf_client_fs.StatEntry objects for each found file. - """ - - category = "/Filesystem/" - args_type = FindFilesArgs - friendly_name = "Find Files" - - MAX_FILES_TO_CHECK = 10000000 - - def Start(self): - """Issue the find request to the client.""" - - # In newer clients, this action is not an iterator anymore so this field is - # unused. We set it anyways for legacy clients. - # TODO(amoser): Remove this no later than April, 2022. - self.args.findspec.iterator.number = self.MAX_FILES_TO_CHECK - - # Convert the filename glob to a regular expression. - if self.args.findspec.path_glob: - self.args.findspec.path_regex = self.args.findspec.path_glob.AsRegEx() - - # Call the client with it - self.CallClient( - server_stubs.Find, - self.args.findspec, - next_state=self.StoreResults.__name__) - - def StoreResults(self, responses): - """Stores the results returned from the client.""" - if not responses.success: - raise IOError(responses.status) - - for response in responses: - - # TODO(amoser): FindSpec is only returned by legacy clients. Remove this - # no later than April, 2022. - if isinstance(response, rdf_client_fs.FindSpec): - response = response.hit - - path_info = rdf_objects.PathInfo.FromStatEntry(response) - data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) - - # Send the stat to the parent flow. - self.SendReply(response) diff --git a/grr/server/grr_response_server/flows/general/find_test.py b/grr/server/grr_response_server/flows/general/find_test.py deleted file mode 100644 index ace75f3fbb..0000000000 --- a/grr/server/grr_response_server/flows/general/find_test.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env python -"""Tests for the Find flow.""" - -import re - -from absl import app - -from grr_response_client.client_actions import searching -from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs -from grr_response_core.lib.rdfvalues import paths as rdf_paths -from grr_response_server.flows.general import find -from grr.test_lib import action_mocks -from grr.test_lib import flow_test_lib -from grr.test_lib import test_lib -from grr.test_lib import vfs_test_lib - - -class TestFindFlow(flow_test_lib.FlowTestsBaseclass): - """Test the interrogate flow.""" - - def setUp(self): - super().setUp() - vfs_overrider = vfs_test_lib.VFSOverrider( - rdf_paths.PathSpec.PathType.OS, vfs_test_lib.ClientVFSHandlerFixture) - vfs_overrider.Start() - self.addCleanup(vfs_overrider.Stop) - self.client_id = self.SetupClient(0) - - def testInvalidFindSpec(self): - """Test that its impossible to produce an invalid findspec.""" - # The regular expression is not valid. - with self.assertRaises(re.error): - rdf_client_fs.FindSpec(path_regex="[") - - def testFindFiles(self): - """Test that the Find flow works with files.""" - client_mock = action_mocks.ActionMock(searching.Find) - - # Prepare a findspec. - findspec = rdf_client_fs.FindSpec( - path_regex="bash", - pathspec=rdf_paths.PathSpec( - path="/", pathtype=rdf_paths.PathSpec.PathType.OS)) - - session_id = flow_test_lib.TestFlowHelper( - find.FindFiles.__name__, - client_mock, - client_id=self.client_id, - creator=self.test_username, - findspec=findspec) - - # Check the results. - results = flow_test_lib.GetFlowResults(self.client_id, session_id) - - # Should match ["bash" and "rbash"]. - matches = set([x.pathspec.Basename() for x in results]) - self.assertCountEqual(matches, ["bash", "rbash"]) - - self.assertLen(results, 4) - for child in results: - self.assertEndsWith(child.pathspec.Basename(), "bash") - self.assertIsInstance(child, rdf_client_fs.StatEntry) - - def testFindFilesWithGlob(self): - """Test that the Find flow works with glob.""" - client_mock = action_mocks.ActionMock(searching.Find) - - # Prepare a findspec. - findspec = rdf_client_fs.FindSpec( - path_glob="bash*", - pathspec=rdf_paths.PathSpec( - path="/", pathtype=rdf_paths.PathSpec.PathType.OS)) - - session_id = flow_test_lib.TestFlowHelper( - find.FindFiles.__name__, - client_mock, - client_id=self.client_id, - creator=self.test_username, - findspec=findspec) - - # Check the results. - results = flow_test_lib.GetFlowResults(self.client_id, session_id) - - # Make sure that bash is a file. - matches = set([x.pathspec.Basename() for x in results]) - self.assertEqual(matches, set(["bash"])) - - self.assertLen(results, 2) - for child in results: - self.assertEndsWith(child.pathspec.Basename(), "bash") - self.assertIsInstance(child, rdf_client_fs.StatEntry) - - def testFindDirectories(self): - """Test that the Find flow works with directories.""" - - client_mock = action_mocks.ActionMock(searching.Find) - - # Prepare a findspec. - findspec = rdf_client_fs.FindSpec( - path_regex="bin", - pathspec=rdf_paths.PathSpec( - path="/", pathtype=rdf_paths.PathSpec.PathType.OS)) - - session_id = flow_test_lib.TestFlowHelper( - find.FindFiles.__name__, - client_mock, - client_id=self.client_id, - creator=self.test_username, - findspec=findspec) - - # Check the results. - results = flow_test_lib.GetFlowResults(self.client_id, session_id) - - # Make sure that bin is a directory - self.assertLen(results, 2) - for child in results: - self.assertEqual(child.__class__.__name__, "StatEntry") - self.assertIn("bin", child.pathspec.CollapsePath()) - - def testCollectionOverwriting(self): - """Test we overwrite the collection every time the flow is executed.""" - - client_mock = action_mocks.ActionMock(searching.Find) - - # Prepare a findspec. - findspec = rdf_client_fs.FindSpec() - findspec.path_regex = "bin" - findspec.pathspec.path = "/" - findspec.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS - - session_id = flow_test_lib.TestFlowHelper( - find.FindFiles.__name__, - client_mock, - client_id=self.client_id, - creator=self.test_username, - findspec=findspec) - - # Check the results. - results = flow_test_lib.GetFlowResults(self.client_id, session_id) - - self.assertLen(results, 2) - - # Now find a new result, should overwrite the collection - findspec.path_regex = "dd" - session_id = flow_test_lib.TestFlowHelper( - find.FindFiles.__name__, - client_mock, - client_id=self.client_id, - creator=self.test_username, - findspec=findspec) - - # Check the results. - results = flow_test_lib.GetFlowResults(self.client_id, session_id) - self.assertLen(results, 1) - - -def main(argv): - # Run the full test suite - test_lib.main(argv) - - -if __name__ == "__main__": - app.run(main) diff --git a/grr/server/grr_response_server/flows/general/fingerprint.py b/grr/server/grr_response_server/flows/general/fingerprint.py index 821c8d3ce1..f543c4491e 100644 --- a/grr/server/grr_response_server/flows/general/fingerprint.py +++ b/grr/server/grr_response_server/flows/general/fingerprint.py @@ -1,33 +1,14 @@ #!/usr/bin/env python """Invoke the fingerprint client action on a file.""" -from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import client_action as rdf_client_action -from grr_response_core.lib.rdfvalues import crypto as rdf_crypto -from grr_response_core.lib.rdfvalues import paths as rdf_paths -from grr_response_core.lib.rdfvalues import structs as rdf_structs -from grr_response_proto import flows_pb2 from grr_response_server import data_store from grr_response_server import flow_base from grr_response_server import server_stubs from grr_response_server.rdfvalues import objects as rdf_objects -class FingerprintFileArgs(rdf_structs.RDFProtoStruct): - protobuf = flows_pb2.FingerprintFileArgs - rdf_deps = [ - rdf_paths.PathSpec, - ] - - -class FingerprintFileResult(rdf_structs.RDFProtoStruct): - protobuf = flows_pb2.FingerprintFileResult - rdf_deps = [ - rdf_crypto.Hash, - rdfvalue.RDFURN, - ] - - +# TODO: Remove this mixin when FileFinder no longer exists. class FingerprintFileLogic(object): """Retrieve all fingerprints of a file.""" @@ -91,27 +72,3 @@ def _ProcessFingerprint(self, responses): def ReceiveFileFingerprint(self, urn, hash_obj, request_data=None): """This method will be called with the new urn and the received hash.""" - - -class FingerprintFile(FingerprintFileLogic, flow_base.FlowBase): - """Retrieve all fingerprints of a file.""" - - category = "/Filesystem/" - args_type = FingerprintFileArgs - behaviours = flow_base.BEHAVIOUR_ADVANCED - - def Start(self): - """Issue the fingerprinting request.""" - super().Start() - - self.FingerprintFile(self.args.pathspec) - - def ReceiveFileFingerprint(self, urn, hash_obj, request_data=None): - # Notify any parent flows. - self.SendReply(FingerprintFileResult(file_urn=urn, hash_entry=hash_obj)) - - def End(self, responses): - """Finalize the flow.""" - super().End(responses) - - self.Log("Finished fingerprinting %s", self.args.pathspec.path) diff --git a/grr/server/grr_response_server/flows/general/fingerprint_test.py b/grr/server/grr_response_server/flows/general/fingerprint_test.py deleted file mode 100644 index bc12cc8f21..0000000000 --- a/grr/server/grr_response_server/flows/general/fingerprint_test.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python -"""Tests for the Fingerprint flow.""" - -import os - -from absl import app - -from grr_response_client.client_actions import file_fingerprint -from grr_response_core.lib.rdfvalues import paths as rdf_paths -from grr_response_server import data_store -from grr_response_server.flows.general import fingerprint as flows_fingerprint -from grr_response_server.rdfvalues import objects as rdf_objects -from grr.test_lib import action_mocks -from grr.test_lib import flow_test_lib -from grr.test_lib import test_lib - - -class TestFingerprintFlow(flow_test_lib.FlowTestsBaseclass): - """Test the Fingerprint flow.""" - - def testFingerprintPresence(self): - client_id = self.SetupClient(0) - - path = os.path.join(self.base_path, "winexec_img.dd") - pathspec = rdf_paths.PathSpec( - pathtype=rdf_paths.PathSpec.PathType.OS, path=path) - - pathspec.Append( - path="/winpmem-amd64.sys", pathtype=rdf_paths.PathSpec.PathType.TSK) - - client_mock = action_mocks.ActionMock(file_fingerprint.FingerprintFile) - session_id = flow_test_lib.TestFlowHelper( - flows_fingerprint.FingerprintFile.__name__, - client_mock, - creator=self.test_username, - client_id=client_id, - pathspec=pathspec) - - results = flow_test_lib.GetFlowResults(client_id, session_id) - self.assertLen(results, 1) - for reply in results: - self.assertIsInstance(reply, flows_fingerprint.FingerprintFileResult) - self.assertTrue( - str(reply.file_urn).endswith( - "test_data/winexec_img.dd/winpmem-amd64.sys")) - - self.assertEqual( - str(reply.hash_entry.sha256), - "40ac571d6d85d669a9a19d498d9f926525481430056ff65746f" - "baf36bee8855f") - self.assertEqual( - str(reply.hash_entry.sha1), - "6e17df1a1020a152f2bf4445d1004b192ae8e42d") - self.assertEqual( - str(reply.hash_entry.md5), "12be1109aa3d3b46c9398972af2008e1") - - path_info = rdf_objects.PathInfo.FromPathSpec(pathspec) - path_info = data_store.REL_DB.ReadPathInfo( - client_id, path_info.path_type, components=tuple(path_info.components)) - - hash_obj = path_info.hash_entry - - self.assertEqual(hash_obj.pecoff_sha1, - "1f32fa4eedfba023653c094143d90999f6b9bc4f") - - self.assertEqual(hash_obj.signed_data[0].revision, 512) - - -def main(argv): - # Run the full test suite - test_lib.main(argv) - - -if __name__ == "__main__": - app.run(main) diff --git a/grr/server/grr_response_server/flows/general/osquery.py b/grr/server/grr_response_server/flows/general/osquery.py index 0caa443199..4e25d2aff9 100644 --- a/grr/server/grr_response_server/flows/general/osquery.py +++ b/grr/server/grr_response_server/flows/general/osquery.py @@ -211,7 +211,7 @@ def _StatForFileArrived( self.StartFileFetch(stat_entry.pathspec) - def Start(self): + def Start(self): # pytype: disable=signature-mismatch # overriding-parameter-count-checks super(OsqueryFlow, self).Start(file_size=FILE_COLLECTION_MAX_SINGLE_FILE_BYTES) self.state.progress = rdf_osquery.OsqueryProgress() diff --git a/grr/server/grr_response_server/flows/general/registry.py b/grr/server/grr_response_server/flows/general/registry.py index a993810350..c682c7e22f 100644 --- a/grr/server/grr_response_server/flows/general/registry.py +++ b/grr/server/grr_response_server/flows/general/registry.py @@ -63,13 +63,20 @@ def _ConditionsToFileFinderConditions(conditions): return result -class RegistryFinder(flow_base.FlowBase): - """This flow looks for registry items matching given criteria.""" +class LegacyRegistryFinder(flow_base.FlowBase): + """This flow looks for registry items matching given criteria. - friendly_name = "Registry Finder" + TODO: remove by EOY2024. + + This flow is scheduled for removal and is no longer tested (all registry + finder related tests are using the ClientRegistryFinder or RegistryFinder, + which is now an alias to ClientRegistryFinder). + """ + + friendly_name = "Legacy Registry Finder (deprecated)" category = "/Registry/" args_type = RegistryFinderArgs - behaviours = flow_base.BEHAVIOUR_BASIC + behaviours = flow_base.BEHAVIOUR_DEBUG @classmethod def GetDefaultArgs(cls, username=None): @@ -127,6 +134,13 @@ def Done(self, responses): self.SendReply(response) +class RegistryFinder(ClientRegistryFinder): + """Legacy alias for ClientRegistryFinder.""" + + friendly_name = "Registry Finder" + behaviours = flow_base.BEHAVIOUR_DEBUG + + class CollectRunKeyBinaries(flow_base.FlowBase): """Collect the binaries used by Run and RunOnce keys on the system. diff --git a/grr/server/grr_response_server/flows/general/registry_finder_test.py b/grr/server/grr_response_server/flows/general/registry_finder_test.py index 2bb86324e5..b5a0b7e8b5 100644 --- a/grr/server/grr_response_server/flows/general/registry_finder_test.py +++ b/grr/server/grr_response_server/flows/general/registry_finder_test.py @@ -20,7 +20,7 @@ def setUp(self): self.addCleanup(registry_stubber.Stop) def _RunRegistryFinder(self, paths=None): - client_mock = action_mocks.GlobClientMock() + client_mock = action_mocks.ClientFileFinderWithVFS() client_id = self.SetupClient(0) @@ -62,40 +62,49 @@ def testRegistryFinder(self): self.assertEqual(results[idx].stat_entry.registry_data.GetValue(), "DefaultValue") - def testListingRegistryKeysDoesNotYieldMTimes(self): - # Just listing all keys does not generate a full stat entry for each of + def testListingRegistryKeysDoesYieldMTimes(self): + # Just listing all keys does generate a full stat entry for each of # the results. results = self._RunRegistryFinder( ["HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest/*"]) - self.assertLen(results, 2) - for result in results: - st = result.stat_entry - self.assertIsNone(st.st_mtime) + results = sorted(results, key=lambda x: x.stat_entry.pathspec.path) - # Explicitly calling RegistryFinder on a value does though. + # We expect 2 results: Value1 and Value2. + self.assertLen(results, 2) + self.assertEqual( + results[0].stat_entry.pathspec.path, + "/HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest/Value1", + ) + self.assertEqual(results[0].stat_entry.st_mtime, 110) + self.assertEqual( + results[1].stat_entry.pathspec.path, + "/HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest/Value2", + ) + self.assertEqual(results[1].stat_entry.st_mtime, 120) + + # Explicitly calling RegistryFinder on a value does that as well. results = self._RunRegistryFinder([ "HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest/Value1", "HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest/Value2", ]) + results = sorted(results, key=lambda x: x.stat_entry.pathspec.path) + # We expect 2 results: Value1 and Value2. self.assertLen(results, 2) - for result in results: - st = result.stat_entry - path = str(st.pathspec.path) - if "Value1" in path: - self.assertEqual(st.st_mtime, 110) - elif "Value2" in path: - self.assertEqual(st.st_mtime, 120) - else: - self.fail("Unexpected value: %s" % path) - - def testListingRegistryHivesWorksCorrectly(self): - results = self._RunRegistryFinder(["*"]) - self.assertLen(results, 2) - self.assertTrue( - [r for r in results if r.stat_entry.pathspec.pathtype == "REGISTRY"]) - self.assertCountEqual([r.stat_entry.pathspec.path for r in results], - ["/HKEY_LOCAL_MACHINE", "/HKEY_USERS"]) + self.assertEqual( + results[0].stat_entry.pathspec.path, + "/HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest/Value1", + ) + self.assertEqual(results[0].stat_entry.st_mtime, 110) + self.assertEqual( + results[1].stat_entry.pathspec.path, + "/HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest/Value2", + ) + self.assertEqual(results[1].stat_entry.st_mtime, 120) + + def testListingRegistryHivesRaises(self): + with self.assertRaisesRegex(RuntimeError, "is not absolute"): + self._RunRegistryFinder(["*"]) def main(argv): diff --git a/grr/server/grr_response_server/flows/general/registry_init.py b/grr/server/grr_response_server/flows/general/registry_init.py index 1e9f039b45..b2b77fc276 100644 --- a/grr/server/grr_response_server/flows/general/registry_init.py +++ b/grr/server/grr_response_server/flows/general/registry_init.py @@ -5,15 +5,14 @@ # These imports populate the Flow registry from grr_response_server.flows import file from grr_response_server.flows.general import administrative -from grr_response_server.flows.general import apple_firmware from grr_response_server.flows.general import artifact_fallbacks from grr_response_server.flows.general import collectors +from grr_response_server.flows.general import crowdstrike from grr_response_server.flows.general import discovery +from grr_response_server.flows.general import dummy from grr_response_server.flows.general import export from grr_response_server.flows.general import file_finder from grr_response_server.flows.general import filesystem -from grr_response_server.flows.general import find -from grr_response_server.flows.general import fingerprint from grr_response_server.flows.general import hardware from grr_response_server.flows.general import large_file from grr_response_server.flows.general import memory diff --git a/grr/server/grr_response_server/flows/general/registry_test.py b/grr/server/grr_response_server/flows/general/registry_test.py index 25d4dd85a4..4399f097de 100644 --- a/grr/server/grr_response_server/flows/general/registry_test.py +++ b/grr/server/grr_response_server/flows/general/registry_test.py @@ -5,7 +5,6 @@ from absl import app -from grr_response_client.client_actions import searching from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder @@ -46,10 +45,7 @@ def RunFlow(self, client_id, keys_paths=None, conditions=None): if conditions is None: conditions = [] - client_mock = action_mocks.ActionMock( - searching.Find, - searching.Grep, - ) + client_mock = action_mocks.ClientFileFinderWithVFS() session_id = flow_test_lib.TestFlowHelper( registry.RegistryFinder.__name__, @@ -150,7 +146,9 @@ def testFindsKeyIfItMatchesLiteralMatchCondition(self): self.assertLen(results, 1) self.assertLen(results[0].matches, 1) - self.assertEqual(results[0].matches[0].offset, 15) + # The matching fragment is at offset 15 and bytes_before is 10. Hence, + # the offset is 5. + self.assertEqual(results[0].matches[0].offset, 5) self.assertEqual(results[0].matches[0].data, b"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun") @@ -190,7 +188,9 @@ def testFindsKeyIfItMatchesRegexMatchCondition(self): self.assertLen(results, 1) self.assertLen(results[0].matches, 1) - self.assertEqual(results[0].matches[0].offset, 15) + # The matching fragment is at offset 15 and bytes_before is 10. Hence, + # the offset is 5. + self.assertEqual(results[0].matches[0].offset, 5) self.assertEqual(results[0].matches[0].data, b"ramFiles%\\Windows Sidebar\\Sidebar.exe /autoRun") diff --git a/grr/server/grr_response_server/flows/general/transfer.py b/grr/server/grr_response_server/flows/general/transfer.py index e384d4ac85..026de8db84 100644 --- a/grr/server/grr_response_server/flows/general/transfer.py +++ b/grr/server/grr_response_server/flows/general/transfer.py @@ -176,6 +176,8 @@ class GetFile(flow_base.FlowBase): """ category = "/Filesystem/" + friendly_name = "GetFile (deprecated)" + behaviours = flow_base.BEHAVIOUR_DEBUG args_type = GetFileArgs @@ -419,10 +421,9 @@ class MultiGetFileLogic(object): # allows us to amortize file store round trips and increases throughput. MIN_CALL_TO_FILE_STORE = 200 - def Start(self, - file_size=0, - maximum_pending_files=1000, - use_external_stores=False): + def Start( + self, file_size=0, maximum_pending_files=1000, use_external_stores=True + ): """Initialize our state.""" super().Start() @@ -1046,6 +1047,9 @@ class MultiGetFile(MultiGetFileLogic, flow_base.FlowBase): progress_type = MultiGetFileProgress result_types = (rdf_client_fs.StatEntry,) + category = "/Filesystem/" + behaviours = flow_base.BEHAVIOUR_DEBUG + def GetProgress(self) -> MultiGetFileProgress: return MultiGetFileProgress( num_pending_hashes=len(self.state.pending_hashes), @@ -1193,30 +1197,3 @@ def ProcessMessages( blobs.append(data) data_store.BLOBS.WriteBlobsWithUnknownHashes(blobs) - - -class SendFile(flow_base.FlowBase): - """This flow sends a file to remote listener. - - To use this flow, choose a key and an IV in hex format (if run from the GUI, - there will be a pregenerated pair key and iv for you to use) and run a - listener on the server you want to use like this: - - nc -l | openssl aes-128-cbc -d -K -iv > - - Returns to parent flow: - A rdf_client_fs.StatEntry of the sent file. - """ - - category = "/Filesystem/" - args_type = rdf_client_action.SendFileRequest - - def Start(self): - """This issues the sendfile request.""" - self.CallClient( - server_stubs.SendFile, self.args, next_state=self.Done.__name__) - - def Done(self, responses): - if not responses.success: - self.Log(responses.status.error_message) - raise flow_base.FlowError(responses.status.error_message) diff --git a/grr/server/grr_response_server/flows/general/webhistory_test.py b/grr/server/grr_response_server/flows/general/webhistory_test.py index 103bd54a35..b35d7cd2e0 100644 --- a/grr/server/grr_response_server/flows/general/webhistory_test.py +++ b/grr/server/grr_response_server/flows/general/webhistory_test.py @@ -58,7 +58,7 @@ def setUp(self): ] self.client_id = self.SetupClient(0, system="Linux", users=users) - self.client_mock = action_mocks.FileFinderClientMock() + self.client_mock = action_mocks.ClientFileFinderWithVFS() def testChromeHistoryFetch(self): """Test that downloading the Chrome history works.""" diff --git a/grr/server/grr_response_server/foreman.py b/grr/server/grr_response_server/foreman.py index 4f6cea1011..38d5cf8981 100644 --- a/grr/server/grr_response_server/foreman.py +++ b/grr/server/grr_response_server/foreman.py @@ -54,46 +54,36 @@ def _RunAction(self, rule, client_id): try: if self._CheckIfHuntTaskWasAssigned(client_id, rule.hunt_id): - logging.info( - "Foreman: ignoring hunt %s on client %s: was started " - "here before", rule.hunt_id, client_id) - else: - try: - hunt.StartHuntFlowOnClient(client_id, rule.hunt_id) - logging.info("Foreman: Started hunt %s on client %s.", rule.hunt_id, - client_id) - except flow.CanNotStartFlowWithExistingIdError: - logging.info( - "Foreman: ignoring hunt %s on client %s: was started " - "here before", rule.hunt_id, client_id) - - actions_count += 1 + raise flow.CanNotStartFlowWithExistingIdError(client_id, rule.hunt_id) + + hunt.StartHuntFlowOnClient(client_id, rule.hunt_id) + logging.info( + "Foreman: Started hunt %s on client %s.", rule.hunt_id, client_id + ) + actions_count += 1 + + except flow.CanNotStartFlowWithExistingIdError: + logging.info( + "Foreman: ignoring hunt %s on client %s: was started here before", + rule.hunt_id, + client_id, + ) # There could be all kinds of errors we don't know about when starting the # hunt so we catch everything here. except Exception as e: # pylint: disable=broad-except - logging.exception("Failure running foreman action on client %s: %s", - rule.hunt_id, e) + logging.exception( + "Failure running hunt %s on client %s: %s", rule.hunt_id, client_id, e + ) return actions_count def _GetLastForemanRunTime(self, client_id): md = data_store.REL_DB.ReadClientMetadata(client_id) - # TODO: we shouldn't care about - # last_fleetspeak_validation_info here. The WriteClientMetadata method - # should have a more predictable behavior and not nullify - # last_fleetspeak_validation_info if it's passed as None. - lfvi = None - if md.HasField("last_fleetspeak_validation_info"): - lfvi = md.last_fleetspeak_validation_info.ToStringDict() - return md.last_foreman_time or rdfvalue.RDFDatetime(0), lfvi - - def _SetLastForemanRunTime(self, client_id, latest_rule, - fleetspeak_validation_info): - data_store.REL_DB.WriteClientMetadata( - client_id, - last_foreman=latest_rule, - fleetspeak_validation_info=fleetspeak_validation_info) + return md.last_foreman_time or rdfvalue.RDFDatetime(0) + + def _SetLastForemanRunTime(self, client_id, latest_rule): + data_store.REL_DB.WriteClientMetadata(client_id, last_foreman=latest_rule) def AssignTasksToClient(self, client_id): """Examines our rules and starts up flows based on the client. @@ -108,8 +98,7 @@ def AssignTasksToClient(self, client_id): if not rules: return 0 - last_foreman_run, fleetspeak_validation_info = self._GetLastForemanRunTime( - client_id) + last_foreman_run = self._GetLastForemanRunTime(client_id) latest_rule_creation_time = max(rule.creation_time for rule in rules) @@ -117,8 +106,7 @@ def AssignTasksToClient(self, client_id): return 0 # Update the latest checked rule on the client. - self._SetLastForemanRunTime(client_id, latest_rule_creation_time, - fleetspeak_validation_info) + self._SetLastForemanRunTime(client_id, latest_rule_creation_time) relevant_rules = [] expired_rules = False diff --git a/grr/server/grr_response_server/frontend_lib.py b/grr/server/grr_response_server/frontend_lib.py index f6424f500e..86c3ecdd57 100644 --- a/grr/server/grr_response_server/frontend_lib.py +++ b/grr/server/grr_response_server/frontend_lib.py @@ -87,7 +87,7 @@ def EnrollFleetspeakClientIfNeeded( now = rdfvalue.RDFDatetime.Now() data_store.REL_DB.WriteClientMetadata( - client_id, first_seen=now, last_ping=now + client_id, first_seen=now, last_ping=now, fleetspeak_validation_info={} ) # Publish the client enrollment message. @@ -234,8 +234,18 @@ def ReceiveRRGResponse( flow_response = rdf_flow_objects.FlowResponse() flow_response.any_payload = packed_result elif response.HasField("log"): - # TODO: Add support for logs. - logging.warning("Dropping a log from '%s': %s", client_id, response) + log = response.log + + timestamp = rdfvalue.RDFDatetime.FromProtoTimestamp(log.timestamp) + level = rrg_pb2.Log.Level.Name(log.level) + + flow_log_entry = rdf_flow_objects.FlowLogEntry() + flow_log_entry.client_id = client_id + flow_log_entry.flow_id = f"{response.flow_id:016X}" + flow_log_entry.timestamp = timestamp + flow_log_entry.message = f"[RRG:{level}] {log.message}" + + data_store.REL_DB.WriteFlowLogEntry(flow_log_entry) return else: raise ValueError(f"Unexpected response: {response}") diff --git a/grr/server/grr_response_server/frontend_lib_test.py b/grr/server/grr_response_server/frontend_lib_test.py index 5782171895..7336792580 100644 --- a/grr/server/grr_response_server/frontend_lib_test.py +++ b/grr/server/grr_response_server/frontend_lib_test.py @@ -264,6 +264,31 @@ def testReceiveRRGResponseResult(self, db: abstract_db.Database): string.ParseFromString(flow_response.any_payload.value) self.assertEqual(string.value, "foobar") + @db_test_lib.WithDatabase + def testReceiveRRGResponseLog(self, db: abstract_db.Database): + client_id = db_test_utils.InitializeClient(db) + flow_id = db_test_utils.InitializeFlow(db, client_id) + + flow_request = rdf_flow_objects.FlowRequest() + flow_request.client_id = client_id + flow_request.flow_id = flow_id + flow_request.request_id = 1337 + db.WriteFlowRequests([flow_request]) + + response = rrg_pb2.Response() + response.flow_id = int(flow_id, 16) + response.request_id = 1337 + response.log.level = rrg_pb2.Log.Level.INFO + response.log.timestamp.GetCurrentTime() + response.log.message = "lorem ipsum dolor sit amet" + + self.server.ReceiveRRGResponse(client_id, response) + + logs = db.ReadFlowLogEntries(client_id, flow_id, offset=0, count=1024) + self.assertLen(logs, 1) + self.assertEqual(logs[0].message, "[RRG:INFO] lorem ipsum dolor sit amet") + self.assertGreater(logs[0].timestamp, rdfvalue.RDFDatetime(0)) + @db_test_lib.WithDatabase def testReceiveRRGResponseUnexpected(self, db: abstract_db.Database): client_id = db_test_utils.InitializeClient(db) diff --git a/grr/server/grr_response_server/gui/api_call_router.py b/grr/server/grr_response_server/gui/api_call_router.py index 8190759e31..8648257659 100644 --- a/grr/server/grr_response_server/gui/api_call_router.py +++ b/grr/server/grr_response_server/gui/api_call_router.py @@ -347,15 +347,6 @@ def ListClientCrashes(self, args, context=None): raise NotImplementedError() - @Category("Clients") - @ArgsType(api_client.ApiListClientActionRequestsArgs) - @ResultType(api_client.ApiListClientActionRequestsResult) - @Http("GET", "/api/clients//action-requests") - def ListClientActionRequests(self, args, context=None): - """List pending action requests for a given client.""" - - raise NotImplementedError() - @Category("Clients") @ArgsType(api_client.ApiGetClientLoadStatsArgs) @ResultType(api_client.ApiGetClientLoadStatsResult) diff --git a/grr/server/grr_response_server/gui/api_call_router_with_approval_checks.py b/grr/server/grr_response_server/gui/api_call_router_with_approval_checks.py index 1027aa99db..3ae9309ed7 100644 --- a/grr/server/grr_response_server/gui/api_call_router_with_approval_checks.py +++ b/grr/server/grr_response_server/gui/api_call_router_with_approval_checks.py @@ -40,8 +40,6 @@ administrative.ExecuteCommand, administrative.ExecutePythonHack, administrative.LaunchBinary, - administrative.Uninstall, - administrative.UpdateClient, administrative.UpdateConfiguration, ] @@ -94,45 +92,53 @@ def _CheckAccess(self, username, subject_id, approval_type): subject = approval_checks.BuildLegacySubject(subject_id, approval_type) if not errors: raise access_control.UnauthorizedAccess( - "No approval found.", subject=subject) + "No approval found.", subject=subject + ) else: raise access_control.UnauthorizedAccess( - " ".join(str(e) for e in errors), subject=subject) + " ".join(str(e) for e in errors), subject=subject + ) def CheckClientAccess(self, context, client_id): """Checks whether a given user can access given client.""" context.approval = self._CheckAccess( - context.username, str(client_id), - rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_CLIENT) + context.username, + str(client_id), + rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_CLIENT, + ) def CheckHuntAccess(self, context, hunt_id): """Checks whether a given user can access given hunt.""" context.approval = self._CheckAccess( - context.username, str(hunt_id), - rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_HUNT) + context.username, + str(hunt_id), + rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_HUNT, + ) def CheckCronJobAccess(self, context, cron_job_id): """Checks whether a given user can access given cron job.""" context.approval = self._CheckAccess( - context.username, str(cron_job_id), - rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_CRON_JOB) + context.username, + str(cron_job_id), + rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_CRON_JOB, + ) def CheckIfCanStartClientFlow(self, username, flow_name): """Checks whether a given user can start a given flow.""" flow_cls = registry.FlowRegistry.FLOW_REGISTRY.get(flow_name) - if flow_cls is None or not hasattr(flow_cls, - "category") or not flow_cls.category: + if flow_cls is None or not flow_cls.CanUseViaAPI(): raise access_control.UnauthorizedAccess( - "Flow %s can't be started via the API." % flow_name) + "Flow %s can't be started via the API." % flow_name + ) if flow_cls in RESTRICTED_FLOWS: try: self.CheckIfHasAccessToRestrictedFlows(username) except access_control.UnauthorizedAccess as e: raise access_control.UnauthorizedAccess( - "Not enough permissions to access restricted " - f"flow {flow_name}") from e + f"Not enough permissions to access restricted flow {flow_name}" + ) from e def CheckIfHasAccessToRestrictedFlows(self, username): """Checks whether a given user can access restricted (sensitive) flows.""" @@ -276,11 +282,6 @@ def ListClientCrashes(self, args, context=None): return self.delegate.ListClientCrashes(args, context=context) - def ListClientActionRequests(self, args, context=None): - self.access_checker.CheckClientAccess(context, args.client_id) - - return self.delegate.ListClientActionRequests(args, context=context) - def GetClientLoadStats(self, args, context=None): self.access_checker.CheckClientAccess(context, args.client_id) diff --git a/grr/server/grr_response_server/gui/api_call_router_with_approval_checks_test.py b/grr/server/grr_response_server/gui/api_call_router_with_approval_checks_test.py index c3b48ed981..f8767babaf 100644 --- a/grr/server/grr_response_server/gui/api_call_router_with_approval_checks_test.py +++ b/grr/server/grr_response_server/gui/api_call_router_with_approval_checks_test.py @@ -6,6 +6,7 @@ from absl import app from grr_response_server import access_control +from grr_response_server import data_store from grr_response_server import flow from grr_response_server.flows.general import osquery from grr_response_server.flows.general import timeline @@ -20,7 +21,7 @@ from grr_response_server.gui.api_plugins import timeline as api_timeline from grr_response_server.gui.api_plugins import user as api_user from grr_response_server.gui.api_plugins import vfs as api_vfs - +from grr_response_server.rdfvalues import objects as rdf_objects from grr.test_lib import flow_test_lib from grr.test_lib import hunt_test_lib from grr.test_lib import test_lib @@ -102,7 +103,6 @@ def CheckMethodIsNotAccessChecked(self, method, args=None, context=None): ACCESS_CHECKED_METHODS.extend([ "InterrogateClient", "ListClientCrashes", - "ListClientActionRequests", "GetClientLoadStats", ]) @@ -620,6 +620,46 @@ def testAllOtherMethodsAreNotAccessChecked(self): self.CheckMethodIsNotAccessChecked(getattr(self.router, method_name)) +class AccessCheckerTest(test_lib.GRRBaseTest): + """Tests for AccessChecker.""" + + def setUp(self): + super().setUp() + self.context = api_call_context.ApiCallContext("test") + self.checker = api_router.AccessChecker( + params=api_router.ApiCallRouterWithApprovalCheckParams() + ) + + def testCheckIfCanStartClientFlowNotRegistered(self): + with self.assertRaisesRegex( + access_control.UnauthorizedAccess, + "Flow NotThere can't be started via the API.", + ): + self.checker.CheckIfCanStartClientFlow(self.context.username, "NotThere") + + def testCheckIfCanStartClientFlow_RestrictedFlow_NormalUser(self): + data_store.REL_DB.WriteGRRUser("restricted") + + with self.assertRaisesRegex( + access_control.UnauthorizedAccess, + "Not enough permissions to access restricted flow LaunchBinary", + ): + self.checker.CheckIfCanStartClientFlow("restricted", "LaunchBinary") + + def testCheckIfCanStartClientFlow_RestrictedFlow_AdminUser(self): + data_store.REL_DB.WriteGRRUser( + "admin", user_type=rdf_objects.GRRUser.UserType.USER_TYPE_ADMIN + ) + # Shouldn't raise if it is allowed. + self.checker.CheckIfCanStartClientFlow("admin", "LaunchBinary") + + def testCheckIfCanStartMultiGetFile(self): + # Shouldn't raise if it is allowed. + self.checker.CheckIfCanStartClientFlow( + self.context.username, "MultiGetFile" + ) + + def main(argv): test_lib.main(argv) diff --git a/grr/server/grr_response_server/gui/api_call_router_without_checks.py b/grr/server/grr_response_server/gui/api_call_router_without_checks.py index 7195a38e4f..8deb739cce 100644 --- a/grr/server/grr_response_server/gui/api_call_router_without_checks.py +++ b/grr/server/grr_response_server/gui/api_call_router_without_checks.py @@ -71,9 +71,6 @@ def GetLastClientIPAddress(self, args, context=None): def ListClientCrashes(self, args, context=None): return api_client.ApiListClientCrashesHandler() - def ListClientActionRequests(self, args, context=None): - return api_client.ApiListClientActionRequestsHandler() - def GetClientLoadStats(self, args, context=None): return api_client.ApiGetClientLoadStatsHandler() diff --git a/grr/server/grr_response_server/gui/api_integration_tests/hunt_test.py b/grr/server/grr_response_server/gui/api_integration_tests/hunt_test.py index 47f5b2a18c..576d115889 100644 --- a/grr/server/grr_response_server/gui/api_integration_tests/hunt_test.py +++ b/grr/server/grr_response_server/gui/api_integration_tests/hunt_test.py @@ -97,6 +97,7 @@ def testStopHunt(self): h = self.api.Hunt(hunt_id).Get() self.assertEqual(h.data.state, h.data.STOPPED) + # TODO: Stop relying on default hunt constants. def testListResults(self): self.client_ids = self.SetupClients(5) with test_lib.FakeTime(42): @@ -110,7 +111,7 @@ def testListResults(self): self.assertEqual(client_ids, set(self.client_ids)) for r in results: self.assertEqual(r.timestamp, 42000000) - self.assertEqual(r.payload.pathspec.path, "/tmp/evil.txt") + self.assertEqual(r.payload.stat_entry.pathspec.path, "/tmp/evil.txt") def testListLogsWithoutClientIds(self): hunt_id = self.StartHunt() diff --git a/grr/server/grr_response_server/gui/api_plugins/client.py b/grr/server/grr_response_server/gui/api_plugins/client.py index 6932a95146..a644425800 100644 --- a/grr/server/grr_response_server/gui/api_plugins/client.py +++ b/grr/server/grr_response_server/gui/api_plugins/client.py @@ -12,13 +12,11 @@ from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs from grr_response_core.lib.rdfvalues import client_network as rdf_client_network from grr_response_core.lib.rdfvalues import cloud as rdf_cloud -from grr_response_core.lib.rdfvalues import flows as rdf_flows from grr_response_core.lib.rdfvalues import search as rdf_search from grr_response_core.lib.rdfvalues import structs as rdf_structs from grr_response_core.lib.util import collection from grr_response_core.lib.util import precondition from grr_response_proto.api import client_pb2 -from grr_response_server import action_registry from grr_response_server import client_index from grr_response_server import data_store from grr_response_server import fleetspeak_connector @@ -220,15 +218,6 @@ def ObjectReference(self): client=rdf_objects.ClientReference(client_id=str(self.client_id))) -class ApiClientActionRequest(rdf_structs.RDFProtoStruct): - protobuf = client_pb2.ApiClientActionRequest - rdf_deps = [ - rdf_flows.GrrMessage, - rdfvalue.RDFDatetime, - rdfvalue.RDFURN, - ] - - class ApiSearchClientsArgs(rdf_structs.RDFProtoStruct): protobuf = client_pb2.ApiSearchClientsArgs @@ -658,6 +647,12 @@ def Handle(self, args, context=None): idx = client_index.ClientIndex() idx.MultiAddClientLabels(client_ids, args.labels) + # Reset foreman rules check so active hunts can match against the new data + data_store.REL_DB.MultiWriteClientMetadata( + client_ids, + last_foreman=rdfvalue.RDFDatetime.EarliestDatabaseSafeValue(), + ) + class ApiRemoveClientsLabelsArgs(rdf_structs.RDFProtoStruct): protobuf = client_pb2.ApiRemoveClientsLabelsArgs @@ -721,63 +716,6 @@ def Handle(self, args, context=None): return ApiListKbFieldsResult(items=sorted(fields)) -class ApiListClientActionRequestsArgs(rdf_structs.RDFProtoStruct): - protobuf = client_pb2.ApiListClientActionRequestsArgs - rdf_deps = [ - ApiClientId, - ] - - -class ApiListClientActionRequestsResult(rdf_structs.RDFProtoStruct): - protobuf = client_pb2.ApiListClientActionRequestsResult - rdf_deps = [ - ApiClientActionRequest, - ] - - -class ApiListClientActionRequestsHandler(api_call_handler_base.ApiCallHandler): - """Lists pending client action requests.""" - - args_type = ApiListClientActionRequestsArgs - result_type = ApiListClientActionRequestsResult - - REQUESTS_NUM_LIMIT = 1000 - - def Handle(self, args, context=None): - result = ApiListClientActionRequestsResult() - - request_cache = {} - - for r in data_store.REL_DB.ReadAllClientActionRequests(str(args.client_id)): - stub = action_registry.ACTION_STUB_BY_ID[r.action_identifier] - client_action = stub.__name__ - - request = ApiClientActionRequest( - leased_until=r.leased_until, - session_id="%s/%s" % (r.client_id, r.flow_id), - client_action=client_action) - result.items.append(request) - - if not args.fetch_responses: - continue - - if r.flow_id not in request_cache: - req_res = data_store.REL_DB.ReadAllFlowRequestsAndResponses( - str(args.client_id), r.flow_id) - request_cache[r.flow_id] = req_res - - for req, responses in request_cache[r.flow_id]: - if req.request_id == r.request_id: - res = [] - for resp_id in sorted(responses): - m = responses[resp_id].AsLegacyGrrMessage() - res.append(m) - - request.responses = res - - return result - - class ApiGetClientLoadStatsArgs(rdf_structs.RDFProtoStruct): protobuf = client_pb2.ApiGetClientLoadStatsArgs rdf_deps = [ diff --git a/grr/server/grr_response_server/gui/api_plugins/client_regression_test.py b/grr/server/grr_response_server/gui/api_plugins/client_regression_test.py index 53b2c5362a..8a0d372eb4 100644 --- a/grr/server/grr_response_server/gui/api_plugins/client_regression_test.py +++ b/grr/server/grr_response_server/gui/api_plugins/client_regression_test.py @@ -4,16 +4,12 @@ from absl import app from grr_response_core.lib import rdfvalue -from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import client_network as rdf_client_network from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats from grr_response_server import data_store -from grr_response_server import flow -from grr_response_server.flows.general import processes from grr_response_server.gui import api_regression_test_lib from grr_response_server.gui.api_plugins import client as client_plugin -from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects from grr.test_lib import flow_test_lib from grr.test_lib import hunt_test_lib from grr.test_lib import test_lib @@ -180,72 +176,6 @@ def Run(self): replace=replace) -class ApiListClientActionRequestsHandlerRegressionTest( - api_regression_test_lib.ApiRegressionTest, - hunt_test_lib.StandardHuntTestMixin): - - api_method = "ListClientActionRequests" - handler = client_plugin.ApiListClientActionRequestsHandler - - def _StartFlow(self, client_id, flow_cls, **kw): - flow_id = flow.StartFlow(flow_cls=flow_cls, client_id=client_id, **kw) - # Lease the client message. - data_store.REL_DB.LeaseClientActionRequests( - client_id, lease_time=rdfvalue.Duration.From(10000, rdfvalue.SECONDS)) - # Write some responses. In the relational db, the client queue will be - # cleaned up as soon as all responses are available. Therefore we cheat - # here and make it look like the request needs more responses so it's not - # considered complete. - - # Write the status first. This will mark the request as waiting for 2 - # responses. - status = rdf_flow_objects.FlowStatus( - client_id=client_id, flow_id=flow_id, request_id=1, response_id=2) - data_store.REL_DB.WriteFlowResponses([status]) - - # Now we read the request, adjust the number, and write it back. - reqs = data_store.REL_DB.ReadAllFlowRequestsAndResponses(client_id, flow_id) - req = reqs[0][0] - - req.nr_responses_expected = 99 - - data_store.REL_DB.WriteFlowRequests([req]) - - # This response now won't trigger any deletion of client messages. - response = rdf_flow_objects.FlowResponse( - client_id=client_id, - flow_id=flow_id, - request_id=1, - response_id=1, - payload=rdf_client.Process(name="test_process")) - data_store.REL_DB.WriteFlowResponses([response]) - - # This is not strictly needed as we don't display this information in the - # UI. - req.nr_responses_expected = 2 - data_store.REL_DB.WriteFlowRequests([req]) - - return flow_id - - def Run(self): - client_id = self.SetupClient(0) - - with test_lib.FakeTime(42): - flow_id = self._StartFlow(client_id, processes.ListProcesses) - - replace = api_regression_test_lib.GetFlowTestReplaceDict(client_id, flow_id) - - self.Check( - "ListClientActionRequests", - args=client_plugin.ApiListClientActionRequestsArgs(client_id=client_id), - replace=replace) - self.Check( - "ListClientActionRequests", - args=client_plugin.ApiListClientActionRequestsArgs( - client_id=client_id, fetch_responses=True), - replace=replace) - - class ApiGetClientLoadStatsHandlerRegressionTest( api_regression_test_lib.ApiRegressionTest): diff --git a/grr/server/grr_response_server/gui/api_plugins/client_test.py b/grr/server/grr_response_server/gui/api_plugins/client_test.py index 656a096a9e..423d731e5b 100644 --- a/grr/server/grr_response_server/gui/api_plugins/client_test.py +++ b/grr/server/grr_response_server/gui/api_plugins/client_test.py @@ -136,6 +136,25 @@ def testAddsTwoLabelsToTwoClients(self): self.assertFalse(data_store.REL_DB.ReadClientLabels(self.client_ids[2])) + def testForemanTimeIsResetOnLabelAdd(self): + cid = self.client_ids[0] + + data_store.REL_DB.WriteClientMetadata( + cid, + last_foreman=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3600), + ) + + self.handler.Handle( + client_plugin.ApiAddClientsLabelsArgs(client_ids=[cid], labels=["foo"]), + context=self.context, + ) + + md = data_store.REL_DB.ReadClientMetadata(cid) + self.assertIsNotNone(md.last_foreman_time) + self.assertEqual( + md.last_foreman_time, rdfvalue.RDFDatetime.EarliestDatabaseSafeValue() + ) + class ApiRemoveClientsLabelsHandlerTest(api_test_lib.ApiCallHandlerTest): """Test for ApiRemoveClientsLabelsHandler.""" diff --git a/grr/server/grr_response_server/gui/api_plugins/flow.py b/grr/server/grr_response_server/gui/api_plugins/flow.py index 5defce8b96..74bd9b84e4 100644 --- a/grr/server/grr_response_server/gui/api_plugins/flow.py +++ b/grr/server/grr_response_server/gui/api_plugins/flow.py @@ -1281,8 +1281,8 @@ def Handle(self, args, context=None): result = [] for name, cls in sorted(registry.FlowRegistry.FLOW_REGISTRY.items()): - # Flows without a category do not show up in the GUI. - if not getattr(cls, "category", None): + # Skip if it is not visible to GUI/API. + if not cls.CanUseViaAPI(): continue # Only show flows that the user is allowed to start. diff --git a/grr/server/grr_response_server/gui/api_plugins/flow_regression_test.py b/grr/server/grr_response_server/gui/api_plugins/flow_regression_test.py index a9ca8ab105..028fd78c13 100644 --- a/grr/server/grr_response_server/gui/api_plugins/flow_regression_test.py +++ b/grr/server/grr_response_server/gui/api_plugins/flow_regression_test.py @@ -7,7 +7,7 @@ from grr_response_core.lib import rdfvalue from grr_response_core.lib import registry from grr_response_core.lib.rdfvalues import client as rdf_client -from grr_response_core.lib.rdfvalues import paths as rdf_paths +from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder from grr_response_server import access_control from grr_response_server import data_store from grr_response_server import flow @@ -15,7 +15,6 @@ from grr_response_server.flows.general import discovery from grr_response_server.flows.general import file_finder from grr_response_server.flows.general import processes -from grr_response_server.flows.general import transfer from grr_response_server.gui import api_regression_test_lib from grr_response_server.gui.api_plugins import flow as flow_plugin from grr_response_server.output_plugins import email_plugin @@ -172,17 +171,17 @@ class ApiListFlowResultsHandlerRegressionTest( handler = flow_plugin.ApiListFlowResultsHandler def _RunFlow(self, client_id): - flow_args = transfer.GetFileArgs( - pathspec=rdf_paths.PathSpec( - path="/tmp/evil.txt", pathtype=rdf_paths.PathSpec.PathType.OS)) + flow_args = rdf_file_finder.FileFinderArgs() + flow_args.paths = ["/tmp/evil.txt"] client_mock = hunt_test_lib.SampleHuntMock(failrate=2) with test_lib.FakeTime(42): return flow_test_lib.StartAndRunFlow( - transfer.GetFile, + file_finder.ClientFileFinder, client_id=client_id, client_mock=client_mock, - flow_args=flow_args) + flow_args=flow_args, + ) def Run(self): acl_test_lib.CreateUser(self.test_username) diff --git a/grr/server/grr_response_server/gui/api_plugins/hunt.py b/grr/server/grr_response_server/gui/api_plugins/hunt.py index 62f07ac2a6..d64601e68e 100644 --- a/grr/server/grr_response_server/gui/api_plugins/hunt.py +++ b/grr/server/grr_response_server/gui/api_plugins/hunt.py @@ -1669,7 +1669,7 @@ def _ArgsToHuntArgs( variable=rdf_hunt_objects.HuntArgumentsVariable( flow_groups=flow_groups)) - def Handle(self, args: ApiCreatePerClientFileCollectionHuntArgs, + def Handle(self, args: ApiCreatePerClientFileCollectionHuntArgs, # pytype: disable=signature-mismatch # overriding-parameter-count-checks context: api_call_context.ApiCallContext): if len(args.per_client_args) > self.MAX_CLIENTS: raise ValueError(f"At most {self.MAX_CLIENTS} clients can be specified " diff --git a/grr/server/grr_response_server/gui/api_plugins/user.py b/grr/server/grr_response_server/gui/api_plugins/user.py index 07face9ea2..e9a539a4a6 100644 --- a/grr/server/grr_response_server/gui/api_plugins/user.py +++ b/grr/server/grr_response_server/gui/api_plugins/user.py @@ -23,7 +23,6 @@ from grr_response_server import flow from grr_response_server import notification as notification_lib from grr_response_server.databases import db -from grr_response_server.flows.general import administrative from grr_response_server.gui import api_call_handler_base from grr_response_server.gui import approval_checks @@ -912,18 +911,6 @@ class ApiCreateClientApprovalHandler(ApiCreateApprovalHandlerBase): approval_notification_type = ( rdf_objects.UserNotification.Type.TYPE_CLIENT_APPROVAL_REQUESTED) - def Handle(self, args, context=None): - result = super().Handle(args, context=context) - - if args.keep_client_alive: - flow.StartFlow( - client_id=str(args.client_id), - flow_cls=administrative.KeepAlive, - creator=context.username, - duration=3600) - - return result - class ApiGetClientApprovalArgs(ApiClientApprovalArgsBase): protobuf = api_user_pb2.ApiGetClientApprovalArgs diff --git a/grr/server/grr_response_server/gui/api_plugins/user_test.py b/grr/server/grr_response_server/gui/api_plugins/user_test.py index 54d77e4d08..6d31c2b2c6 100644 --- a/grr/server/grr_response_server/gui/api_plugins/user_test.py +++ b/grr/server/grr_response_server/gui/api_plugins/user_test.py @@ -462,16 +462,6 @@ def setUp(self): self.args.approval.notified_users = ["approver"] self.args.approval.email_cc_addresses = ["test@example.com"] - def testKeepAliveFlowIsStartedWhenFlagIsSet(self): - self.args.keep_client_alive = True - - self.handler.Handle(self.args, self.context) - - flows = data_store.REL_DB.ReadAllFlowObjects( - client_id=str(self.args.client_id)) - flow_class_names = [f.flow_class_name for f in flows] - self.assertEqual(flow_class_names, ["KeepAlive"]) - def testSendsEmailWithApprovalInformation(self): with mock.patch.object(email_alerts.EMAIL_ALERTER, "SendEmail") as send_fn: approval_id = self.handler.Handle(self.args, self.context).id diff --git a/grr/server/grr_response_server/gui/api_plugins/yara.py b/grr/server/grr_response_server/gui/api_plugins/yara.py index 0e4b8bd03c..7b196cdeae 100644 --- a/grr/server/grr_response_server/gui/api_plugins/yara.py +++ b/grr/server/grr_response_server/gui/api_plugins/yara.py @@ -29,7 +29,7 @@ class ApiUploadYaraSignatureHandler(api_call_handler_base.ApiCallHandler): args_type = ApiUploadYaraSignatureArgs result_type = ApiUploadYaraSignatureResult - def Handle( + def Handle( # pytype: disable=signature-mismatch # overriding-parameter-count-checks self, args: ApiUploadYaraSignatureArgs, context: api_call_context.ApiCallContext, diff --git a/grr/server/grr_response_server/gui/api_regression_http.py b/grr/server/grr_response_server/gui/api_regression_http.py index 5dff4e9dd3..6cd14d1f4a 100644 --- a/grr/server/grr_response_server/gui/api_regression_http.py +++ b/grr/server/grr_response_server/gui/api_regression_http.py @@ -150,23 +150,6 @@ def HandleCheck(self, method_metadata, args=None, replace=None): return check_result -# TODO(amoser): Clean up comments and naming. - -# Each mixin below configures a different way for regression tests to run. After -# AFF4 is gone, there will be only 2 mixins left here (http API v1 and http -# API v2). At the moment we have v1 with rel_db, v1 without, v2 with rel_db, -# and v2 without. -# -# Duplicated test methods are added to these classes explicitly to make sure -# they were not misconfigured and REL_DB is enabled in tests that count on -# REL_DB being enabled - this will go away with AFF4 support going away. -# -# output_file_name denotes where golden regression data should be read from - -# the point of REL_DB enabled tests is that they should stay compatible with -# the current API behavior. So we direct them to use same golden files - -# hence the duplication. Again, this will go away soon. - - class HttpApiV1RelationalDBRegressionTestMixin(HttpApiRegressionTestMixinBase): """Test class for HTTP v1 protocol API regression test.""" diff --git a/grr/server/grr_response_server/gui/gui_test_lib.py b/grr/server/grr_response_server/gui/gui_test_lib.py index 420fc7f8d7..aa162bf8ed 100644 --- a/grr/server/grr_response_server/gui/gui_test_lib.py +++ b/grr/server/grr_response_server/gui/gui_test_lib.py @@ -23,8 +23,8 @@ from grr_response_core.lib import utils from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs from grr_response_core.lib.rdfvalues import client_network as rdf_client_network - from grr_response_core.lib.rdfvalues import crypto as rdf_crypto +from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder from grr_response_core.lib.rdfvalues import paths as rdf_paths from grr_response_core.lib.rdfvalues import structs as rdf_structs from grr_response_proto import tests_pb2 @@ -33,8 +33,8 @@ from grr_response_server import foreman_rules from grr_response_server import output_plugin from grr_response_server.databases import db +from grr_response_server.flows.general import file_finder from grr_response_server.flows.general import processes -from grr_response_server.flows.general import transfer from grr_response_server.gui import api_auth_manager from grr_response_server.gui import api_call_router_with_approval_checks from grr_response_server.gui import webauth @@ -689,7 +689,8 @@ def setUp(self): # Make the user use the advanced gui so we can test it. data_store.REL_DB.WriteGRRUser( - self.test_username, ui_mode=api_user.GUISettings.UIMode.ADVANCED) + self.test_username, ui_mode=api_user.GUISettings.UIMode.DEBUG + ) artifact_patcher = ar_test_lib.PatchDatastoreOnlyArtifactRegistry() artifact_patcher.start() @@ -763,18 +764,20 @@ def CreateSampleHunt(self, self.hunt_urn = self.StartHunt( flow_runner_args=rdf_flow_runner.FlowRunnerArgs( - flow_name=transfer.GetFile.__name__), - flow_args=transfer.GetFileArgs( - pathspec=rdf_paths.PathSpec( - path=path or "/tmp/evil.txt", - pathtype=rdf_paths.PathSpec.PathType.OS, - )), + flow_name=file_finder.ClientFileFinder.__name__ + ), + flow_args=rdf_file_finder.FileFinderArgs( + paths=[path or "/tmp/evil.txt"], + pathtype=rdf_paths.PathSpec.PathType.OS, + action=rdf_file_finder.FileFinderAction.Download(), + ), client_rule_set=self._CreateForemanClientRuleSet(), output_plugins=output_plugins or [], client_rate=0, client_limit=client_limit, creator=creator or self.test_username, - paused=stopped) + paused=stopped, + ) return self.hunt_urn diff --git a/grr/server/grr_response_server/gui/selenium_tests/flow_copy_test.py b/grr/server/grr_response_server/gui/selenium_tests/flow_copy_test.py index 03dd5e87ed..52f6b8a2ac 100644 --- a/grr/server/grr_response_server/gui/selenium_tests/flow_copy_test.py +++ b/grr/server/grr_response_server/gui/selenium_tests/flow_copy_test.py @@ -178,17 +178,22 @@ def testCopyingHuntFlowWorks(self): self.Open("/legacy#/clients/%s" % self.client_id) self.Click("css=a[grrtarget='client.flows']") - # StartHunt creates a hunt with a GetFile flow, so selecting a GetFile row. - self.Click("css=td:contains('GetFile')") + # StartHunt creates a hunt with a ClientFileFinder flow, so selecting a + # ClientFileFinder row. + self.Click("css=td:contains('ClientFileFinder')") self.Click("css=button[name=copy_flow]") self.Click("css=button:contains('Launch')") # Check that flows list got updated and that the new flow is selected. - self.WaitUntil(self.IsElementPresent, - "css=grr-client-flows-list tr:contains('GetFile'):nth(1)") self.WaitUntil( - self.IsElementPresent, "css=grr-client-flows-list " - "tr:contains('GetFile'):nth(0).row-selected") + self.IsElementPresent, + "css=grr-client-flows-list tr:contains('ClientFileFinder'):nth(1)", + ) + self.WaitUntil( + self.IsElementPresent, + "css=grr-client-flows-list " + "tr:contains('ClientFileFinder'):nth(0).row-selected", + ) def testCopyingFlowWithRawBytesWithNonAsciiCharsInArgumentsWorks(self): # Literal is defined simply as "bytes" in its proto definition. We make sure diff --git a/grr/server/grr_response_server/gui/selenium_tests/flow_management_test.py b/grr/server/grr_response_server/gui/selenium_tests/flow_management_test.py index 8220edff2a..d346373e27 100644 --- a/grr/server/grr_response_server/gui/selenium_tests/flow_management_test.py +++ b/grr/server/grr_response_server/gui/selenium_tests/flow_management_test.py @@ -7,11 +7,12 @@ from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs +from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder from grr_response_core.lib.rdfvalues import paths as rdf_paths from grr_response_server import data_store from grr_response_server import flow_base +from grr_response_server.flows.general import file_finder as flows_file_finder from grr_response_server.flows.general import processes as flows_processes -from grr_response_server.flows.general import transfer as flows_transfer from grr_response_server.flows.general import webhistory as flows_webhistory from grr_response_server.gui import api_call_context from grr_response_server.gui import gui_test_lib @@ -48,21 +49,21 @@ def testOpeningManageFlowsOfUnapprovedClientRedirectsToHostInfoPage(self): "You do not have an approval for this client.") def testPageTitleReflectsSelectedFlow(self): - pathspec = rdf_paths.PathSpec( - path=os.path.join(self.base_path, "test.plist"), - pathtype=rdf_paths.PathSpec.PathType.OS) - args = flows_transfer.GetFileArgs(pathspec=pathspec) + args = rdf_file_finder.FileFinderArgs( + paths=[os.path.join(self.base_path, "test.plist")] + ) flow_id = flow_test_lib.StartFlow( - flows_transfer.GetFile, + flows_file_finder.ClientFileFinder, self.client_id, flow_args=args, - creator=self.test_username) + creator=self.test_username, + ) self.Open("/legacy#/clients/%s/flows/" % self.client_id) self.WaitUntilEqual("GRR | %s | Flows" % self.client_id, self.GetPageTitle) - self.Click("css=td:contains('GetFile')") + self.Click("css=td:contains('ClientFileFinder')") self.WaitUntilEqual("GRR | %s | %s" % (self.client_id, flow_id), self.GetPageTitle) @@ -100,18 +101,21 @@ def testFlowManagement(self): self.Click("css=#_Filesystem a") # Wait until the tree has expanded. - self.WaitUntil(self.IsTextPresent, flows_transfer.GetFile.__name__) + self.WaitUntil( + self.IsTextPresent, flows_file_finder.ClientFileFinder.friendly_name + ) - self.Click("link=" + flows_transfer.GetFile.__name__) + self.Click("link=" + flows_file_finder.ClientFileFinder.friendly_name) - self.Select("css=.form-group:has(> label:contains('Pathtype')) select", - "OS") - self.Type("css=.form-group:has(> label:contains('Path')) input", - u"/dev/c/msn[1].exe") + self.Select( + "css=.form-group:has(> label:contains('Pathtype')) select", + "OS (default)", + ) + self.Type("css=grr-glob-expressions-list-form input", "/dev/c/msn[1].exe") self.Click("css=button.Launch") - self.WaitUntil(self.IsTextPresent, "Launched Flow GetFile") + self.WaitUntil(self.IsTextPresent, "Launched Flow ClientFileFinder") # Test that recursive tests are shown in a tree table. flow_test_lib.StartFlow( @@ -129,8 +133,10 @@ def testFlowManagement(self): "css=grr-client-flows-list tr:visible:nth(1) td:nth(2)") self.WaitUntilEqual( - flows_transfer.GetFile.__name__, self.GetText, - "css=grr-client-flows-list tr:visible:nth(2) td:nth(2)") + flows_file_finder.ClientFileFinder.__name__, + self.GetText, + "css=grr-client-flows-list tr:visible:nth(2) td:nth(2)", + ) # Click on the first tree_closed to open it. self.Click("css=grr-client-flows-list tr:visible:nth(1) .tree_closed") @@ -140,15 +146,18 @@ def testFlowManagement(self): "css=grr-client-flows-list tr:visible:nth(2) td:nth(2)") # Select the requests tab - self.Click("css=td:contains(GetFile)") + self.Click("css=td:contains(ClientFileFinder)") self.Click("css=li[heading=Requests]") self.WaitUntil(self.IsElementPresent, "css=td:contains(1)") - # Check that a StatFile client action was issued as part of the GetFile - # flow. "Stat" matches the next state that is called. - self.WaitUntil(self.IsElementPresent, - "css=.tab-content td.proto_value:contains(Stat)") + # Check that a StatFile client action was issued as part of the + # ClientFileFinder flow. "StoreResultsWithoutBlobs" matches the next state + # that is called. + self.WaitUntil( + self.IsElementPresent, + "css=.tab-content td.proto_value:contains(StoreResultsWithoutBlobs)", + ) def testOverviewIsShownForNestedFlows(self): flow_test_lib.StartFlow( diff --git a/grr/server/grr_response_server/gui/selenium_tests/hunt_archive_test.py b/grr/server/grr_response_server/gui/selenium_tests/hunt_archive_test.py index bcc25133f2..941fa27ec7 100644 --- a/grr/server/grr_response_server/gui/selenium_tests/hunt_archive_test.py +++ b/grr/server/grr_response_server/gui/selenium_tests/hunt_archive_test.py @@ -285,7 +285,7 @@ def testDisplaysErrorMessageIfSingleHuntFileCanNotBeRead(self): original_result = results[0] payload = original_result.payload.Copy() - payload.pathspec.path += "blah" + payload.stat_entry.pathspec.path += "blah" client_id = self.SetupClients(1)[0] self.AddResultsToHunt(hunt_id, client_id, [payload]) diff --git a/grr/server/grr_response_server/gui/selenium_tests/hunt_view_test.py b/grr/server/grr_response_server/gui/selenium_tests/hunt_view_test.py index 0a7dab25c9..260518f11d 100644 --- a/grr/server/grr_response_server/gui/selenium_tests/hunt_view_test.py +++ b/grr/server/grr_response_server/gui/selenium_tests/hunt_view_test.py @@ -111,8 +111,6 @@ def testHuntClientsView(self): self.RequestAndGrantClientApproval(client_id) - # TODO(user): move the code below outside of if as soon as hunt's - # subflows are properly reported in the REL_DB implementation. self.Click("css=tr:contains('%s') td:nth-of-type(2) a" % client_id) self.WaitUntil(self.IsTextPresent, "Flow Information") self.WaitUntil(self.IsTextPresent, self.base_path) @@ -316,6 +314,9 @@ def testHuntNotificationIsShownAndClickable(self): def testLogsTabShowsLogsFromAllClients(self): hunt_id = self.SetupHuntDetailView(failrate=-1) + # Make sure all flows have a log entry. + for client in self.client_ids: + self.AddLogToHunt(hunt_id, client, f"TestLogLine for client {client}") self.Open("/legacy#main=ManageHunts") self.Click("css=td:contains('%s')" % hunt_id) @@ -323,10 +324,7 @@ def testLogsTabShowsLogsFromAllClients(self): for client_id in self.client_ids: self.WaitUntil(self.IsTextPresent, client_id) - # TODO(amoser): Get rid of the aff4 prefix here. - self.WaitUntil( - self.IsTextPresent, "File aff4:/%s/%s transferred successfully." % - (client_id, "fs/os/tmp/evil.txt")) + self.WaitUntil(self.IsTextPresent, f"TestLogLine for client {client_id}") def testLogsTabGetsAutoRefreshed(self): hunt_id = self.CreateSampleHunt() @@ -353,6 +351,9 @@ def testLogsTabGetsAutoRefreshed(self): def testLogsTabFiltersLogsByString(self): hunt_id = self.SetupHuntDetailView(failrate=-1) + # Make sure all flows have a log entry. + for client in self.client_ids: + self.AddLogToHunt(hunt_id, client, f"TestLogLine for client {client}") self.Open("/legacy#main=ManageHunts") self.Click("css=td:contains('%s')" % hunt_id) @@ -362,16 +363,15 @@ def testLogsTabFiltersLogsByString(self): self.Click("css=grr-hunt-log button:contains('Filter')") self.WaitUntil(self.IsTextPresent, self.client_ids[-1]) - # TODO(amoser): Get rid of the aff4 prefix here. self.WaitUntil( - self.IsTextPresent, "File aff4:/%s/%s transferred successfully." % - (self.client_ids[-1], "fs/os/tmp/evil.txt")) + self.IsTextPresent, f"TestLogLine for client {self.client_ids[-1]}" + ) for client_id in self.client_ids[:-1]: self.WaitUntilNot(self.IsTextPresent, client_id) self.WaitUntilNot( - self.IsTextPresent, "File %s/%s transferred successfully." % - (client_id, "fs/os/tmp/evil.txt")) + self.IsTextPresent, f"TestLogLine for client {client_id}" + ) def testLogsTabShowsDatesInUTC(self): hunt_id = self.CreateSampleHunt() @@ -386,6 +386,11 @@ def testLogsTabShowsDatesInUTC(self): def testErrorsTabShowsErrorsFromAllClients(self): hunt_id = self.SetupHuntDetailView(failrate=1) + # Make sure all flows have an error entry. + for client in self.client_ids: + self.AddErrorToHunt( + hunt_id, client, "Client Error", traceback.format_exc() + ) self.Open("/legacy#main=ManageHunts") self.Click("css=td:contains('%s')" % hunt_id) @@ -491,7 +496,7 @@ def testShowsResultsTabForIndividualFlowsOnClients(self): self.Open("/legacy#c=" + self.client_ids[0]) self.Click("css=a:contains('Manage launched flows')") - self.Click("css=grr-client-flows-list tr:contains('GetFile')") + self.Click("css=grr-client-flows-list tr:contains('ClientFileFinder')") self.Click("css=li[heading=Results]") # This is to check that no exceptions happened when we tried to display # results. diff --git a/grr/server/grr_response_server/gui/selenium_tests/notifications_test.py b/grr/server/grr_response_server/gui/selenium_tests/notifications_test.py index cc9715e7be..dbde2242cb 100644 --- a/grr/server/grr_response_server/gui/selenium_tests/notifications_test.py +++ b/grr/server/grr_response_server/gui/selenium_tests/notifications_test.py @@ -137,9 +137,9 @@ def testUserSettings(self): mode_selector = "css=.form-group:has(label:contains('Mode')) select" - # Open settings dialog and change mode from BASIC to ADVANCED + # Open settings dialog and change mode from DEBUG to BASIC self.Click("css=grr-user-settings-button") - self.assertEqual("ADVANCED", self.GetSelectedLabel(mode_selector).strip()) + self.assertEqual("DEBUG", self.GetSelectedLabel(mode_selector).strip()) self.Select(mode_selector, "BASIC (default)") self.Click("css=button[name=Proceed]") diff --git a/grr/server/grr_response_server/gui/selenium_tests/v2/file_test.py b/grr/server/grr_response_server/gui/selenium_tests/v2/file_test.py index 9b2ab7d6b9..367b5a4b49 100644 --- a/grr/server/grr_response_server/gui/selenium_tests/v2/file_test.py +++ b/grr/server/grr_response_server/gui/selenium_tests/v2/file_test.py @@ -427,5 +427,420 @@ def testCorrectlyDisplaysMultiGetFileResults(self): f"css=multi-get-file-flow-details td:contains('/somefile{i}')") +class StatMultipleFilesTest(gui_test_lib.GRRSeleniumTest): + """Tests the StatMultipleFiles Flow.""" + + def _GenCollectedResult(self, i: int) -> rdf_client_fs.StatEntry: + return rdf_client_fs.StatEntry( + pathspec=rdf_paths.PathSpec.OS(path=f"/file{i}"), + st_size=i, + ) + + def setUp(self): + super().setUp() + self.client_id = self.SetupClient(0) + self.RequestAndGrantClientApproval(self.client_id) + + def testDisplaysOnlyWhenResultsCollected(self): + flow_args = rdf_file_finder.StatMultipleFilesArgs( + path_expressions=["/file0"] + ) + flow_test_lib.StartFlow( + file.StatMultipleFiles, + creator=self.test_username, + client_id=self.client_id, + flow_args=flow_args, + ) + + self.Open(f"/v2/clients/{self.client_id}") + + self.WaitUntil( + self.IsElementPresent, "css=.flow-title:contains('Stat files')" + ) + + self.assertFalse( + self.IsElementPresent("css=result-accordion file-results-table"), + ) + + def testDisplaysCollectedResult(self): + flow_args = rdf_file_finder.StatMultipleFilesArgs( + path_expressions=["/file0", "/file1", "/file2"] + ) + flow_id = flow_test_lib.StartFlow( + file.StatMultipleFiles, + creator=self.test_username, + client_id=self.client_id, + flow_args=flow_args, + ) + + flow_test_lib.AddResultsToFlow( + self.client_id, flow_id, [self._GenCollectedResult(i) for i in range(3)] + ) + + with flow_test_lib.FlowResultMetadataOverride( + file.StatMultipleFiles, + rdf_flow_objects.FlowResultMetadata( + is_metadata_set=True, + num_results_per_type_tag=[ + rdf_flow_objects.FlowResultCount( + type=rdf_client_fs.StatEntry.__name__, + count=3, + ) + ], + ), + ): + self.Open(f"/v2/clients/{self.client_id}") + self.WaitUntil( + self.IsElementPresent, + "css=result-accordion:contains('/file0 + 2 more')", + ) + + self.Click("css=result-accordion:contains('/file0 + 2 more')") + + for i in range(3): + self.WaitUntil( + self.IsElementPresent, + f"css=file-results-table:contains('{i} B')", + ) + + def testDownloadButtonFlowFinished(self): + flow_args = rdf_file_finder.StatMultipleFilesArgs( + path_expressions=["/file0"] + ) + flow_id = flow_test_lib.StartFlow( + file.StatMultipleFiles, + creator=self.test_username, + client_id=self.client_id, + flow_args=flow_args, + ) + + flow_test_lib.MarkFlowAsFinished(self.client_id, flow_id) + + with flow_test_lib.FlowResultMetadataOverride( + file.StatMultipleFiles, + rdf_flow_objects.FlowResultMetadata( + is_metadata_set=True, + num_results_per_type_tag=[ + rdf_flow_objects.FlowResultCount( + type=rdf_client_fs.StatEntry.__name__, + count=1, + ) + ], + ), + ): + self.Open(f"/v2/clients/{self.client_id}") + self.WaitUntil( + self.IsElementPresent, + "css=a[mat-stroked-button]:contains('Download')", + ) + + def testFlowError(self): + flow_args = rdf_file_finder.StatMultipleFilesArgs( + path_expressions=["/file0", "/file1"] + ) + flow_id = flow_test_lib.StartFlow( + file.StatMultipleFiles, + creator=self.test_username, + client_id=self.client_id, + flow_args=flow_args, + ) + + flow_test_lib.MarkFlowAsFailed(self.client_id, flow_id) + + self.Open(f"/v2/clients/{self.client_id}") + self.WaitUntil(self.IsElementPresent, "css=flow-details mat-icon.error") + + def testDisplaysArgumentsAccordion(self): + flow_args = rdf_file_finder.StatMultipleFilesArgs( + path_expressions=["/file0", "/file1"] + ) + flow_test_lib.StartFlow( + file.StatMultipleFiles, + creator=self.test_username, + client_id=self.client_id, + flow_args=flow_args, + ) + + self.Open(f"/v2/clients/{self.client_id}") + self.WaitUntil( + self.IsElementPresent, + "css=.flow-title:contains('Stat files')", + ) + + self.Click("css=result-accordion .title:contains('Flow arguments')") + + self.WaitUntil( + self.IsElementPresent, + "css=app-glob-expression-input", + ) + path_input = self.GetElement( + "css=app-glob-expression-input[id=path0] input" + ) + self.assertEqual("/file0", path_input.get_attribute("value")) + + path_input_2 = self.GetElement( + "css=app-glob-expression-input[id=path1] input" + ) + self.assertEqual("/file1", path_input_2.get_attribute("value")) + + def testFlowArgumentForm(self): + self.Open(f"/v2/clients/{self.client_id}") + + self.WaitUntil(self.IsElementPresent, "css=app-flow-picker") + self.Type('css=app-flow-picker input[name="flowSearchBox"]', "stat") + self.Click('css=[role=option]:Contains("Stat files")') + + element = self.WaitUntil( + self.GetVisibleElement, + "css=flow-args-form app-glob-expression-input[id=path0] input", + ) + element.send_keys("/foo/firstpath") + + self.Click('css=flow-form button:contains("Add path expression")') + + element = self.WaitUntil( + self.GetVisibleElement, + "css=flow-args-form app-glob-expression-input[id=path1] input", + ) + element.send_keys("/bar/secondpath") + + self.Click('css=flow-form button:contains("Start")') + + def FlowHasBeenStarted(): + handler = api_flow.ApiListFlowsHandler() + flows = handler.Handle( + api_flow.ApiListFlowsArgs( + client_id=self.client_id, top_flows_only=True + ), + context=api_call_context.ApiCallContext(username=self.test_username), + ).items + return flows[0] if len(flows) == 1 else None + + flow = self.WaitUntil(FlowHasBeenStarted) + + self.assertEqual(flow.name, file.StatMultipleFiles.__name__) + self.assertCountEqual( + flow.args.path_expressions, ["/foo/firstpath", "/bar/secondpath"] + ) + + +class HashMultipleFilesTest(gui_test_lib.GRRSeleniumTest): + """Tests the HashMultipleFiles Flow.""" + + def _GenCollectedResult( + self, i: int + ) -> rdf_file_finder.CollectMultipleFilesResult: + return rdf_file_finder.CollectMultipleFilesResult( + stat=rdf_client_fs.StatEntry( + pathspec=rdf_paths.PathSpec.OS(path=f"/file{i}"), + st_size=i, + ), + hash=rdf_crypto.Hash( + sha256=binascii.unhexlify( + "9e8dc93e150021bb4752029ebbff51394aa36f069cf19901578e4f06017acdb5" + ), + sha1=binascii.unhexlify("6dd6bee591dfcb6d75eb705405302c3eab65e21a"), + md5=binascii.unhexlify("8b0a15eefe63fd41f8dc9dee01c5cf9a"), + ), + status=rdf_file_finder.CollectMultipleFilesResult.Status.COLLECTED, + ) + + def _GenFailedResult( + self, + i: int, + ) -> rdf_file_finder.CollectMultipleFilesResult: + return rdf_file_finder.CollectMultipleFilesResult( + stat=rdf_client_fs.StatEntry( + pathspec=rdf_paths.PathSpec.OS(path=f"/file{i}"), + ), + status=rdf_file_finder.CollectMultipleFilesResult.Status.FAILED, + error=f"errormsg{i}", + ) + + def setUp(self): + super().setUp() + self.client_id = self.SetupClient(0) + self.RequestAndGrantClientApproval(self.client_id) + + def testDisplaysOnlyWhenCollected(self): + flow_args = rdf_file_finder.HashMultipleFilesArgs( + path_expressions=["/file0"] + ) + flow_test_lib.StartFlow( + file.HashMultipleFiles, + creator=self.test_username, + client_id=self.client_id, + flow_args=flow_args, + ) + + with flow_test_lib.FlowProgressOverride( + file.HashMultipleFiles, + rdf_file_finder.HashMultipleFilesProgress(num_in_progress=1), + ): + self.Open(f"/v2/clients/{self.client_id}") + + self.WaitUntil( + self.IsElementPresent, + "css=.flow-title:contains('Hash files')", + ) + + self.assertFalse( + self.IsElementPresent("css=result-accordion file-results-table"), + ) + + def testDisplaysCollectedResult(self): + flow_args = rdf_file_finder.HashMultipleFilesArgs( + path_expressions=["/file0", "/file1", "/file2"] + ) + flow_id = flow_test_lib.StartFlow( + file.HashMultipleFiles, + creator=self.test_username, + client_id=self.client_id, + flow_args=flow_args, + ) + + flow_test_lib.AddResultsToFlow( + self.client_id, flow_id, [self._GenCollectedResult(i) for i in range(3)] + ) + + with flow_test_lib.FlowProgressOverride( + file.HashMultipleFiles, + rdf_file_finder.HashMultipleFilesProgress(num_hashed=3), + ): + self.Open(f"/v2/clients/{self.client_id}") + self.WaitUntil( + self.IsElementPresent, + "css=.flow-title:contains('Hash files')", + ) + + self.Click("css=result-accordion:contains('/file0 + 2 more')") + + for i in range(3): + self.WaitUntil( + self.IsElementPresent, + f"css=result-accordion .results:contains('{i} B')", + ) + + def testDownloadButtonFlowFinished(self): + flow_args = rdf_file_finder.HashMultipleFilesArgs( + path_expressions=["/file0"] + ) + flow_id = flow_test_lib.StartFlow( + file.HashMultipleFiles, + creator=self.test_username, + client_id=self.client_id, + flow_args=flow_args, + ) + + flow_test_lib.MarkFlowAsFinished(self.client_id, flow_id) + + with flow_test_lib.FlowResultMetadataOverride( + file.HashMultipleFiles, + rdf_flow_objects.FlowResultMetadata( + is_metadata_set=True, + num_results_per_type_tag=[ + rdf_flow_objects.FlowResultCount( + type=rdf_file_finder.CollectMultipleFilesResult.__name__, + count=1, + ) + ], + ), + ): + self.Open(f"/v2/clients/{self.client_id}") + self.WaitUntil( + self.IsElementPresent, + "css=a[mat-stroked-button]:contains('Download')", + ) + + def testFlowError(self): + flow_args = rdf_file_finder.HashMultipleFilesArgs( + path_expressions=["/file0", "/file1"] + ) + flow_id = flow_test_lib.StartFlow( + file.HashMultipleFiles, + creator=self.test_username, + client_id=self.client_id, + flow_args=flow_args, + ) + + flow_test_lib.MarkFlowAsFailed(self.client_id, flow_id) + + self.Open(f"/v2/clients/{self.client_id}") + self.WaitUntil(self.IsElementPresent, "css=flow-details mat-icon.error") + + def testDisplaysArgumentsAccordion(self): + flow_args = rdf_file_finder.HashMultipleFilesArgs( + path_expressions=["/file0", "/file1"] + ) + flow_test_lib.StartFlow( + file.HashMultipleFiles, + creator=self.test_username, + client_id=self.client_id, + flow_args=flow_args, + ) + + self.Open(f"/v2/clients/{self.client_id}") + self.WaitUntil( + self.IsElementPresent, + "css=.flow-title:contains('Hash files')", + ) + + self.Click("css=result-accordion .title:contains('Flow arguments')") + + self.WaitUntil( + self.IsElementPresent, + "css=app-glob-expression-input", + ) + path_input = self.GetElement( + "css=app-glob-expression-input[id=path0] input" + ) + self.assertEqual("/file0", path_input.get_attribute("value")) + + path_input_2 = self.GetElement( + "css=app-glob-expression-input[id=path1] input" + ) + self.assertEqual("/file1", path_input_2.get_attribute("value")) + + def testFlowArgumentForm(self): + self.Open(f"/v2/clients/{self.client_id}") + + self.WaitUntil(self.IsElementPresent, "css=app-flow-picker") + self.Type('css=app-flow-picker input[name="flowSearchBox"]', "hash") + self.Click('css=[role=option]:Contains("Hash files")') + + element = self.WaitUntil( + self.GetVisibleElement, + "css=flow-args-form app-glob-expression-input[id=path0] input", + ) + element.send_keys("/foo/firstpath") + + self.Click('css=flow-form button:contains("Add path expression")') + + element = self.WaitUntil( + self.GetVisibleElement, + "css=flow-args-form app-glob-expression-input[id=path1] input", + ) + element.send_keys("/bar/secondpath") + + self.Click('css=flow-form button:contains("Start")') + + def FlowHasBeenStarted(): + handler = api_flow.ApiListFlowsHandler() + flows = handler.Handle( + api_flow.ApiListFlowsArgs( + client_id=self.client_id, top_flows_only=True + ), + context=api_call_context.ApiCallContext(username=self.test_username), + ).items + return flows[0] if len(flows) == 1 else None + + flow = self.WaitUntil(FlowHasBeenStarted) + + self.assertEqual(flow.name, file.HashMultipleFiles.__name__) + self.assertCountEqual( + flow.args.path_expressions, ["/foo/firstpath", "/bar/secondpath"] + ) + + if __name__ == "__main__": app.run(test_lib.main) diff --git a/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog-directive.js b/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog-directive.js index 193b616a6b..e9fbb3e2ec 100644 --- a/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog-directive.js +++ b/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog-directive.js @@ -48,9 +48,6 @@ const RequestApprovalDialogController = class { /** @export {boolean} */ this.useCcAddresses = true; - /** @export {boolean} */ - this.keepClientAlive = true; - this.scope_.$watch('approvalType', this.onApprovalTypeChange_.bind(this)); this.scope_.$watch( 'controller.selectedRecentReason', @@ -133,9 +130,6 @@ const RequestApprovalDialogController = class { if (this.useCcAddresses && this.ccAddresses) { args['approval']['email_cc_addresses'] = this.ccAddresses; } - if (this.scope_['approvalType'] === 'client' && this.keepClientAlive) { - args['keep_client_alive'] = true; - } this.grrApiService_.post(url, args).then( function success() { diff --git a/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog-directive_test.js b/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog-directive_test.js index 206dd54d2c..75e8255b44 100644 --- a/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog-directive_test.js +++ b/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog-directive_test.js @@ -149,18 +149,6 @@ describe('request approval dialog', () => { expect($('input[name=acl_reason]', element).attr('disabled')).toBeTruthy(); }); - it('doesn\'t show keep-alive checkbox for "hunt"approval type', () => { - const element = renderTestTemplate('hunt'); - - expect($('input[name=keepalive]', element).length).toBe(0); - }); - - it('shows keep-alive checkbox for "client" approval type', () => { - const element = renderTestTemplate('client'); - - expect($('input[name=keepalive]', element).length).toBe(1); - }); - it('includes approvers into request if CC-checbox is selected', () => { spyOn(grrApiService, 'post').and.returnValue($q.defer().promise); @@ -184,37 +172,6 @@ describe('request approval dialog', () => { notified_users: ['foo'], email_cc_addresses: ['foo@bar.com', 'xyz@example.com'], }, - keep_client_alive: true, - }); - }); - - it('includes keep_client_alive into request if checkbox is selected', () => { - spyOn(grrApiService, 'post').and.returnValue($q.defer().promise); - - const element = - renderTestTemplate('client', 'foo/bar', clientApprovalRequest); - - setApproverInput(element, 'foo'); - - $('input[name=acl_reason]', element).val('bar'); - browserTriggerEvent($('input[name=acl_reason]', element), 'change'); - - browserTriggerEvent( - $('input[name=cc_approval]', element).prop('checked', false), - 'change'); - browserTriggerEvent( - $('input[name=keepalive]', element).prop('checked', true), - 'change'); - - browserTriggerEvent($('button[name=Proceed]', element), 'click'); - - expect(grrApiService.post).toHaveBeenCalledWith('foo/bar', { - client_id: 'C:123456', - approval: { - reason: 'bar', - notified_users: ['foo'], - }, - keep_client_alive: true, }); }); @@ -244,7 +201,6 @@ describe('request approval dialog', () => { reason: 'reason2', notified_users: ['foo'], }, - keep_client_alive: true, }); }); diff --git a/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog.html b/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog.html index ad48277ff0..293f2027cc 100644 --- a/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog.html +++ b/grr/server/grr_response_server/gui/static/angular-components/acl/request-approval-dialog.html @@ -58,16 +58,6 @@

Create a new approval request.

-
-
- -
-
- diff --git a/grr/server/grr_response_server/gui/static/angular-components/docs/api-docs-examples.json b/grr/server/grr_response_server/gui/static/angular-components/docs/api-docs-examples.json index 1a1185f297..11f6b9d7f2 100644 --- a/grr/server/grr_response_server/gui/static/angular-components/docs/api-docs-examples.json +++ b/grr/server/grr_response_server/gui/static/angular-components/docs/api-docs-examples.json @@ -172,7 +172,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -389,7 +389,7 @@ "subject": { "age": 42000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -1093,26 +1093,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -1280,12 +1295,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:123456", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -1750,7 +1771,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -1964,7 +1985,7 @@ "subject": { "age": 42000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -2093,7 +2114,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -2307,7 +2328,7 @@ "subject": { "age": 42000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -2394,7 +2415,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -2595,7 +2616,7 @@ "type_stripped_response": { "age": 42000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -2722,7 +2743,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -2927,7 +2948,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -3128,7 +3149,7 @@ { "age": 42000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -3198,7 +3219,7 @@ { "age": 45000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -3286,7 +3307,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -3487,7 +3508,7 @@ { "age": 42000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -3575,7 +3596,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -3776,7 +3797,7 @@ { "age": 45000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -5023,26 +5044,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -5207,12 +5243,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:123456", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -5379,26 +5421,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -5563,12 +5620,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:567890", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -5700,26 +5763,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -5957,26 +6035,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -6162,12 +6255,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:556677", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -6239,12 +6338,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:DDEEFF", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -6484,26 +6589,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -6725,12 +6845,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:667788", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -7114,26 +7240,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -7321,12 +7462,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:123456", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -7461,26 +7608,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -7660,12 +7822,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:123456", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -7798,26 +7966,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -7965,12 +8148,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:123456", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -10097,7 +10286,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -10311,7 +10500,7 @@ "subject": { "age": 42000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -10674,26 +10863,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -10858,12 +11062,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:123456", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -11341,22 +11551,6 @@ "url": "/api/artifacts" } ], - "ApiListClientActionRequestsHandler": [ - { - "api_method": "ListClientActionRequests", - "method": "GET", - "response": {}, - "test_class": "ApiListClientActionRequestsHandlerRegressionTest_http_v1", - "url": "/api/clients/C.1000000000000000/action-requests" - }, - { - "api_method": "ListClientActionRequests", - "method": "GET", - "response": {}, - "test_class": "ApiListClientActionRequestsHandlerRegressionTest_http_v1", - "url": "/api/clients/C.1000000000000000/action-requests?fetch_responses=1" - } - ], "ApiListClientApprovalsHandler": [ { "api_method": "ListClientApprovals", @@ -11419,7 +11613,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -11670,7 +11864,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -11888,7 +12082,7 @@ "subject": { "age": 42000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -11974,7 +12168,7 @@ "subject": { "age": 42000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -12107,7 +12301,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -12325,7 +12519,7 @@ "subject": { "age": 42000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -12416,7 +12610,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -12465,7 +12659,7 @@ { "client_id": "aff4:/C.1000000000000000", "client_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -12500,7 +12694,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -12549,7 +12743,7 @@ { "client_id": "aff4:/C.1000000000000000", "client_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -13214,7 +13408,7 @@ }, "doc": { "type": "unicode", - "value": "This flow looks for files matching given criteria and acts on them.\n\n FileFinder searches for files that match glob expressions. The \"action\"\n (e.g. Download) is applied to files that match all given \"conditions\".\n Matches are then written to the results collection. If there are no\n \"conditions\" specified, \"action\" is just applied to all found files.\n \n\n Call Spec:\n flow.StartFlow(client_id=client_id, flow_cls=file_finder.FileFinder, paths=paths, pathtype=pathtype, conditions=conditions, action=action, process_non_regular_files=process_non_regular_files, follow_links=follow_links, xdev=xdev, implementation_type=implementation_type)\n\n Args:\n action\n description: \n type: FileFinderAction\n default: None\n\n conditions\n description: These conditions will be applied to all files that match the path arguments.\n type: \n default: None\n\n follow_links\n description: Should symbolic links be followed in recursive directory listings.\n type: bool\n default: False\n\n implementation_type\n description: Force use of an implementation.\n type: EnumNamedValue\n default: 0\n\n paths\n description: A path to glob that can contain %% expansions.\n type: \n default: None\n\n pathtype\n description: Path type to glob in.\n type: EnumNamedValue\n default: OS\n\n process_non_regular_files\n description: Look both into regular files and non-regular files (devices, named pipes, sockets). NOTE: This is very dangerous and should be used with care.\n type: bool\n default: 0\n\n xdev\n description: Behavior when ecountering device boundaries while doing recursive searches.\n type: EnumNamedValue\n default: LOCAL\n" + "value": "An alias for ClientFileFinder.\n\n Call Spec:\n flow.StartFlow(client_id=client_id, flow_cls=file_finder.FileFinder, paths=paths, pathtype=pathtype, conditions=conditions, action=action, process_non_regular_files=process_non_regular_files, follow_links=follow_links, xdev=xdev, implementation_type=implementation_type)\n\n Args:\n action\n description: \n type: FileFinderAction\n default: None\n\n conditions\n description: These conditions will be applied to all files that match the path arguments.\n type: \n default: None\n\n follow_links\n description: Should symbolic links be followed in recursive directory listings.\n type: bool\n default: False\n\n implementation_type\n description: Force use of an implementation.\n type: EnumNamedValue\n default: 0\n\n paths\n description: A path to glob that can contain %% expansions.\n type: \n default: None\n\n pathtype\n description: Path type to glob in.\n type: EnumNamedValue\n default: OS\n\n process_non_regular_files\n description: Look both into regular files and non-regular files (devices, named pipes, sockets). NOTE: This is very dangerous and should be used with care.\n type: bool\n default: 0\n\n xdev\n description: Behavior when ecountering device boundaries while doing recursive searches.\n type: EnumNamedValue\n default: LOCAL\n" }, "friendly_name": { "type": "unicode", @@ -13274,7 +13468,7 @@ ], "category": "Filesystem", "default_args": {}, - "doc": "This flow looks for files matching given criteria and acts on them.\n\n FileFinder searches for files that match glob expressions. The \"action\"\n (e.g. Download) is applied to files that match all given \"conditions\".\n Matches are then written to the results collection. If there are no\n \"conditions\" specified, \"action\" is just applied to all found files.\n \n\n Call Spec:\n flow.StartFlow(client_id=client_id, flow_cls=file_finder.FileFinder, paths=paths, pathtype=pathtype, conditions=conditions, action=action, process_non_regular_files=process_non_regular_files, follow_links=follow_links, xdev=xdev, implementation_type=implementation_type)\n\n Args:\n action\n description: \n type: FileFinderAction\n default: None\n\n conditions\n description: These conditions will be applied to all files that match the path arguments.\n type: \n default: None\n\n follow_links\n description: Should symbolic links be followed in recursive directory listings.\n type: bool\n default: False\n\n implementation_type\n description: Force use of an implementation.\n type: EnumNamedValue\n default: 0\n\n paths\n description: A path to glob that can contain %% expansions.\n type: \n default: None\n\n pathtype\n description: Path type to glob in.\n type: EnumNamedValue\n default: OS\n\n process_non_regular_files\n description: Look both into regular files and non-regular files (devices, named pipes, sockets). NOTE: This is very dangerous and should be used with care.\n type: bool\n default: 0\n\n xdev\n description: Behavior when ecountering device boundaries while doing recursive searches.\n type: EnumNamedValue\n default: LOCAL\n", + "doc": "An alias for ClientFileFinder.\n\n Call Spec:\n flow.StartFlow(client_id=client_id, flow_cls=file_finder.FileFinder, paths=paths, pathtype=pathtype, conditions=conditions, action=action, process_non_regular_files=process_non_regular_files, follow_links=follow_links, xdev=xdev, implementation_type=implementation_type)\n\n Args:\n action\n description: \n type: FileFinderAction\n default: None\n\n conditions\n description: These conditions will be applied to all files that match the path arguments.\n type: \n default: None\n\n follow_links\n description: Should symbolic links be followed in recursive directory listings.\n type: bool\n default: False\n\n implementation_type\n description: Force use of an implementation.\n type: EnumNamedValue\n default: 0\n\n paths\n description: A path to glob that can contain %% expansions.\n type: \n default: None\n\n pathtype\n description: Path type to glob in.\n type: EnumNamedValue\n default: OS\n\n process_non_regular_files\n description: Look both into regular files and non-regular files (devices, named pipes, sockets). NOTE: This is very dangerous and should be used with care.\n type: bool\n default: 0\n\n xdev\n description: Behavior when ecountering device boundaries while doing recursive searches.\n type: EnumNamedValue\n default: LOCAL\n", "friendly_name": "File Finder", "name": "FileFinder" }, @@ -13745,66 +13939,71 @@ "type": "ApiFlowResult", "value": { "payload": { - "type": "StatEntry", + "type": "FileFinderResult", "value": { - "pathspec": { - "type": "PathSpec", + "stat_entry": { + "type": "StatEntry", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" + "pathspec": { + "type": "PathSpec", + "value": { + "path": { + "type": "unicode", + "value": "/tmp/evil.txt" + }, + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" + } + } }, - "pathtype": { - "type": "EnumNamedValue", - "value": "OS" + "st_atime": { + "type": "RDFDatetimeSeconds", + "value": 1336469177 + }, + "st_ctime": { + "type": "RDFDatetimeSeconds", + "value": 1336129892 + }, + "st_dev": { + "type": "long", + "value": 64512 + }, + "st_gid": { + "type": "long", + "value": 5000 + }, + "st_ino": { + "type": "long", + "value": 1063090 + }, + "st_mode": { + "type": "StatMode", + "value": 33184 + }, + "st_mtime": { + "type": "RDFDatetimeSeconds", + "value": 1336129892 + }, + "st_nlink": { + "type": "long", + "value": 1 + }, + "st_size": { + "type": "long", + "value": 12 + }, + "st_uid": { + "type": "long", + "value": 139592 } } - }, - "st_atime": { - "type": "RDFDatetimeSeconds", - "value": 1336469177 - }, - "st_ctime": { - "type": "RDFDatetimeSeconds", - "value": 1336129892 - }, - "st_dev": { - "type": "long", - "value": 64512 - }, - "st_gid": { - "type": "long", - "value": 5000 - }, - "st_ino": { - "type": "long", - "value": 1063090 - }, - "st_mode": { - "type": "StatMode", - "value": 33184 - }, - "st_mtime": { - "type": "RDFDatetimeSeconds", - "value": 1336129892 - }, - "st_nlink": { - "type": "long", - "value": 1 - }, - "st_size": { - "type": "long", - "value": 12 - }, - "st_uid": { - "type": "long", - "value": 139592 } } }, "payload_type": { "type": "unicode", - "value": "StatEntry" + "value": "FileFinderResult" }, "timestamp": { "type": "RDFDatetime", @@ -13819,22 +14018,24 @@ "items": [ { "payload": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - }, - "st_atime": 1336469177, - "st_ctime": 1336129892, - "st_dev": 64512, - "st_gid": 5000, - "st_ino": 1063090, - "st_mode": 33184, - "st_mtime": 1336129892, - "st_nlink": 1, - "st_size": 12, - "st_uid": 139592 + "stat_entry": { + "pathspec": { + "path": "/tmp/evil.txt", + "pathtype": "OS" + }, + "st_atime": 1336469177, + "st_ctime": 1336129892, + "st_dev": 64512, + "st_gid": 5000, + "st_ino": 1063090, + "st_mode": 33184, + "st_mtime": 1336129892, + "st_nlink": 1, + "st_size": 12, + "st_uid": 139592 + } }, - "payload_type": "StatEntry", + "payload_type": "FileFinderResult", "timestamp": 42000000 } ] @@ -14746,26 +14947,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -14934,12 +15150,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:123456", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -15219,7 +15441,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -15268,7 +15490,7 @@ { "client_id": "aff4:/C.1000000000000000", "client_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -15303,7 +15525,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -15352,7 +15574,7 @@ { "client_id": "aff4:/C.1000000000000000", "client_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ @@ -17975,26 +18197,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -18146,12 +18383,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:123456", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -18277,26 +18520,41 @@ "value": 0 }, "flow_args": { - "type": "GetFileArgs", + "type": "FileFinderArgs", "value": { - "pathspec": { - "type": "PathSpec", + "action": { + "type": "FileFinderAction", "value": { - "path": { - "type": "unicode", - "value": "/tmp/evil.txt" - }, - "pathtype": { + "action_type": { "type": "EnumNamedValue", - "value": "OS" + "value": "DOWNLOAD" + }, + "download": { + "type": "FileFinderDownloadActionOptions", + "value": { + "collect_ext_attrs": { + "type": "bool", + "value": false + } + } } } + }, + "paths": [ + { + "type": "GlobExpression", + "value": "/tmp/evil.txt" + } + ], + "pathtype": { + "type": "EnumNamedValue", + "value": "OS" } } }, "flow_name": { "type": "unicode", - "value": "GetFile" + "value": "ClientFileFinder" }, "hunt_id": { "type": "ApiHuntId", @@ -18448,12 +18706,18 @@ "duration": 1209600, "failed_clients_count": 0, "flow_args": { - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "action": { + "action_type": "DOWNLOAD", + "download": { + "collect_ext_attrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flow_name": "GetFile", + "flow_name": "ClientFileFinder", "hunt_id": "H:123456", "hunt_runner_args": { "avg_cpu_seconds_per_client_limit": 60, @@ -18511,7 +18775,7 @@ "value": { "build_time": { "type": "unicode", - "value": "1980-01-01" + "value": "1980-01-01T12:00:00.000000+00:00" }, "client_name": { "type": "unicode", @@ -18712,7 +18976,7 @@ { "age": 42000000, "agent_info": { - "build_time": "1980-01-01", + "build_time": "1980-01-01T12:00:00.000000+00:00", "client_name": "GRR Monitor", "client_version": 1234, "labels": [ diff --git a/grr/server/grr_response_server/gui/static/angular-components/docs/api-v2-docs-examples.json b/grr/server/grr_response_server/gui/static/angular-components/docs/api-v2-docs-examples.json index 1b2619e5c9..0a4ff52fbe 100644 --- a/grr/server/grr_response_server/gui/static/angular-components/docs/api-v2-docs-examples.json +++ b/grr/server/grr_response_server/gui/static/angular-components/docs/api-v2-docs-examples.json @@ -80,7 +80,7 @@ "subject": { "age": "42000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -312,13 +312,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:123456", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -520,7 +526,7 @@ "subject": { "age": "42000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -610,7 +616,7 @@ "subject": { "age": "42000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -688,7 +694,7 @@ "response": { "age": "42000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -806,7 +812,7 @@ { "age": "42000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -875,7 +881,7 @@ { "age": "45000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -954,7 +960,7 @@ { "age": "42000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -1033,7 +1039,7 @@ { "age": "45000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -1517,13 +1523,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:123456", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -1605,13 +1617,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:567890", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -1683,13 +1701,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:556677", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -1760,13 +1784,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:DDEEFF", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -1878,13 +1908,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:667788", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -2213,13 +2249,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:123456", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -2299,13 +2341,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:123456", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -2383,13 +2431,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:123456", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -3203,7 +3257,7 @@ "subject": { "age": "42000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -3361,13 +3415,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:123456", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -3572,22 +3632,6 @@ "url": "/api/v2/artifacts" } ], - "ApiListClientActionRequestsHandler": [ - { - "api_method": "ListClientActionRequests", - "method": "GET", - "response": {}, - "test_class": "ApiListClientActionRequestsHandlerRegressionTest_http_v2", - "url": "/api/v2/clients/C.1000000000000000/action-requests" - }, - { - "api_method": "ListClientActionRequests", - "method": "GET", - "response": {}, - "test_class": "ApiListClientActionRequestsHandlerRegressionTest_http_v2", - "url": "/api/v2/clients/C.1000000000000000/action-requests?fetch_responses=1" - } - ], "ApiListClientApprovalsHandler": [ { "api_method": "ListClientApprovals", @@ -3611,7 +3655,7 @@ "subject": { "age": "42000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -3695,7 +3739,7 @@ "subject": { "age": "42000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -3789,7 +3833,7 @@ "subject": { "age": "42000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -3871,7 +3915,7 @@ { "clientId": "aff4:/C.1000000000000000", "clientInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -3898,7 +3942,7 @@ { "clientId": "aff4:/C.1000000000000000", "clientInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -4118,7 +4162,7 @@ "defaultArgs": { "@type": "type.googleapis.com/grr.FileFinderArgs" }, - "doc": "This flow looks for files matching given criteria and acts on them.\n\n FileFinder searches for files that match glob expressions. The \"action\"\n (e.g. Download) is applied to files that match all given \"conditions\".\n Matches are then written to the results collection. If there are no\n \"conditions\" specified, \"action\" is just applied to all found files.\n \n\n Call Spec:\n flow.StartFlow(client_id=client_id, flow_cls=file_finder.FileFinder, paths=paths, pathtype=pathtype, conditions=conditions, action=action, process_non_regular_files=process_non_regular_files, follow_links=follow_links, xdev=xdev, implementation_type=implementation_type)\n\n Args:\n action\n description: \n type: FileFinderAction\n default: None\n\n conditions\n description: These conditions will be applied to all files that match the path arguments.\n type: \n default: None\n\n follow_links\n description: Should symbolic links be followed in recursive directory listings.\n type: bool\n default: False\n\n implementation_type\n description: Force use of an implementation.\n type: EnumNamedValue\n default: 0\n\n paths\n description: A path to glob that can contain %% expansions.\n type: \n default: None\n\n pathtype\n description: Path type to glob in.\n type: EnumNamedValue\n default: OS\n\n process_non_regular_files\n description: Look both into regular files and non-regular files (devices, named pipes, sockets). NOTE: This is very dangerous and should be used with care.\n type: bool\n default: 0\n\n xdev\n description: Behavior when ecountering device boundaries while doing recursive searches.\n type: EnumNamedValue\n default: LOCAL\n", + "doc": "An alias for ClientFileFinder.\n\n Call Spec:\n flow.StartFlow(client_id=client_id, flow_cls=file_finder.FileFinder, paths=paths, pathtype=pathtype, conditions=conditions, action=action, process_non_regular_files=process_non_regular_files, follow_links=follow_links, xdev=xdev, implementation_type=implementation_type)\n\n Args:\n action\n description: \n type: FileFinderAction\n default: None\n\n conditions\n description: These conditions will be applied to all files that match the path arguments.\n type: \n default: None\n\n follow_links\n description: Should symbolic links be followed in recursive directory listings.\n type: bool\n default: False\n\n implementation_type\n description: Force use of an implementation.\n type: EnumNamedValue\n default: 0\n\n paths\n description: A path to glob that can contain %% expansions.\n type: \n default: None\n\n pathtype\n description: Path type to glob in.\n type: EnumNamedValue\n default: OS\n\n process_non_regular_files\n description: Look both into regular files and non-regular files (devices, named pipes, sockets). NOTE: This is very dangerous and should be used with care.\n type: bool\n default: 0\n\n xdev\n description: Behavior when ecountering device boundaries while doing recursive searches.\n type: EnumNamedValue\n default: LOCAL\n", "friendlyName": "File Finder", "name": "FileFinder" }, @@ -4241,23 +4285,25 @@ "items": [ { "payload": { - "@type": "type.googleapis.com/grr.StatEntry", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - }, - "stAtime": "1336469177", - "stCtime": "1336129892", - "stDev": "64512", - "stGid": 5000, - "stIno": "1063090", - "stMode": "33184", - "stMtime": "1336129892", - "stNlink": "1", - "stSize": "12", - "stUid": 139592 + "@type": "type.googleapis.com/grr.FileFinderResult", + "statEntry": { + "pathspec": { + "path": "/tmp/evil.txt", + "pathtype": "OS" + }, + "stAtime": "1336469177", + "stCtime": "1336129892", + "stDev": "64512", + "stGid": 5000, + "stIno": "1063090", + "stMode": "33184", + "stMtime": "1336129892", + "stNlink": "1", + "stSize": "12", + "stUid": 139592 + } }, - "payloadType": "StatEntry", + "payloadType": "FileFinderResult", "timestamp": "42000000" } ] @@ -4544,13 +4590,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:123456", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -4677,7 +4729,7 @@ { "clientId": "aff4:/C.1000000000000000", "clientInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -4704,7 +4756,7 @@ { "clientId": "aff4:/C.1000000000000000", "clientInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ @@ -5485,13 +5537,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:123456", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -5562,13 +5620,19 @@ "duration": "1209600", "failedClientsCount": "0", "flowArgs": { - "@type": "type.googleapis.com/grr.GetFileArgs", - "pathspec": { - "path": "/tmp/evil.txt", - "pathtype": "OS" - } + "@type": "type.googleapis.com/grr.FileFinderArgs", + "action": { + "actionType": "DOWNLOAD", + "download": { + "collectExtAttrs": false + } + }, + "paths": [ + "/tmp/evil.txt" + ], + "pathtype": "OS" }, - "flowName": "GetFile", + "flowName": "ClientFileFinder", "huntId": "H:123456", "huntRunnerArgs": { "avgCpuSecondsPerClientLimit": "60", @@ -5618,7 +5682,7 @@ { "age": "42000000", "agentInfo": { - "buildTime": "1980-01-01", + "buildTime": "1980-01-01T12:00:00.000000+00:00", "clientName": "GRR Monitor", "clientVersion": 1234, "labels": [ diff --git a/grr/server/grr_response_server/gui/ui/components/approval_card/approval_card.ts b/grr/server/grr_response_server/gui/ui/components/approval_card/approval_card.ts index c42f2b0aca..c66d233c24 100644 --- a/grr/server/grr_response_server/gui/ui/components/approval_card/approval_card.ts +++ b/grr/server/grr_response_server/gui/ui/components/approval_card/approval_card.ts @@ -6,8 +6,8 @@ import {ActivatedRoute, Router} from '@angular/router'; import {BehaviorSubject, Subject} from 'rxjs'; import {map, takeUntil, withLatestFrom} from 'rxjs/operators'; -import {RequestStatus, RequestStatusType} from '../../lib/api/track_request'; -import {Approval} from '../../lib/models/user'; +import {type RequestStatus, RequestStatusType} from '../../lib/api/track_request'; +import {type Approval} from '../../lib/models/user'; import {observeOnDestroy} from '../../lib/reactive'; import {ApprovalCardLocalStore} from '../../store/approval_card_local_store'; import {ConfigGlobalStore} from '../../store/config_global_store'; diff --git a/grr/server/grr_response_server/gui/ui/components/approval_chip/approval_chip.ts b/grr/server/grr_response_server/gui/ui/components/approval_chip/approval_chip.ts index 7fc7e3ee91..a302c01166 100644 --- a/grr/server/grr_response_server/gui/ui/components/approval_chip/approval_chip.ts +++ b/grr/server/grr_response_server/gui/ui/components/approval_chip/approval_chip.ts @@ -3,7 +3,7 @@ import {BehaviorSubject} from 'rxjs'; import {map} from 'rxjs/operators'; import {DateTime} from '../../lib/date_time'; -import {Approval, ApprovalStatus} from '../../lib/models/user'; +import {type Approval, ApprovalStatus} from '../../lib/models/user'; const TITLES: {readonly[key in ApprovalStatus['type']]: string} = { 'expired': 'No access', diff --git a/grr/server/grr_response_server/gui/ui/components/client_details/entry_history_button/entry_history_button.ts b/grr/server/grr_response_server/gui/ui/components/client_details/entry_history_button/entry_history_button.ts index 9304418c8a..5bcdde66eb 100644 --- a/grr/server/grr_response_server/gui/ui/components/client_details/entry_history_button/entry_history_button.ts +++ b/grr/server/grr_response_server/gui/ui/components/client_details/entry_history_button/entry_history_button.ts @@ -3,7 +3,7 @@ import {MatDialog} from '@angular/material/dialog'; import {Client} from '../../../lib/models/client'; import {ClientDetailsGlobalStore} from '../../../store/client_details_global_store'; -import {EntryHistoryDialog, EntryHistoryDialogParams, EntryType} from '../entry_history_dialog/entry_history_dialog'; +import {EntryHistoryDialog, EntryHistoryDialogParams, type EntryType} from '../entry_history_dialog/entry_history_dialog'; /** * Component displaying a button with the associated entry changes, diff --git a/grr/server/grr_response_server/gui/ui/components/data_renderers/process/process_view.scss b/grr/server/grr_response_server/gui/ui/components/data_renderers/process/process_view.scss index 69adf8602a..bbcd1cf9e6 100644 --- a/grr/server/grr_response_server/gui/ui/components/data_renderers/process/process_view.scss +++ b/grr/server/grr_response_server/gui/ui/components/data_renderers/process/process_view.scss @@ -22,6 +22,7 @@ width: 30px; line-height: 30px; color: #868484; + padding: 2px 0 0; } } diff --git a/grr/server/grr_response_server/gui/ui/components/data_renderers/table/flow_table.ts b/grr/server/grr_response_server/gui/ui/components/data_renderers/table/flow_table.ts new file mode 100644 index 0000000000..23853b372b --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/data_renderers/table/flow_table.ts @@ -0,0 +1,54 @@ +import {CommonModule} from '@angular/common'; +import {ChangeDetectionStrategy, Component} from '@angular/core'; +import {MatButtonModule} from '@angular/material/button'; +import {MatIconModule} from '@angular/material/icon'; +import {MatTableModule} from '@angular/material/table'; + +import {FLOW_PAYLOAD_TYPE_TRANSLATION} from '../../../lib/api_translation/result'; +import {ExpandableHashModule} from '../../expandable_hash/module'; +import {CopyButtonModule} from '../../helpers/copy_button/copy_button_module'; +import {DrawerLinkModule} from '../../helpers/drawer_link/drawer_link_module'; +import {FilterPaginate} from '../../helpers/filter_paginate/filter_paginate'; +import {HumanReadableSizeModule} from '../../human_readable_size/module'; +import {TimestampModule} from '../../timestamp/module'; +import {UserImageModule} from '../../user_image/module'; +import {FileModeModule} from '../file_mode/file_mode_module'; + +import {DataTableView} from './table'; + +/** Component to show a data table with flow results. */ +@Component({ + selector: 'app-flow-data-table', + templateUrl: './table.ng.html', + styleUrls: ['./table.scss'], + standalone: true, + changeDetection: ChangeDetectionStrategy.OnPush, + imports: [ + CommonModule, + + MatButtonModule, + MatIconModule, + MatTableModule, + + FilterPaginate, + TimestampModule, + CopyButtonModule, + DrawerLinkModule, + ExpandableHashModule, + FileModeModule, + HumanReadableSizeModule, + UserImageModule, + ], +}) +export class FlowDataTableView extends DataTableView { + protected override getPayloadTypeTranslation(type: string) { + const translation = + FLOW_PAYLOAD_TYPE_TRANSLATION[type as keyof typeof FLOW_PAYLOAD_TYPE_TRANSLATION]; + + // If there is no "Flow" definition for the Payload Type translation, + // we fall back to the "Hunt" translation: + if (!translation) return super.getPayloadTypeTranslation(type); + + return translation; + } +} \ No newline at end of file diff --git a/grr/server/grr_response_server/gui/ui/components/data_renderers/table/flow_table_test.ts b/grr/server/grr_response_server/gui/ui/components/data_renderers/table/flow_table_test.ts new file mode 100644 index 0000000000..ac40d40506 --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/data_renderers/table/flow_table_test.ts @@ -0,0 +1,152 @@ +import {TestbedHarnessEnvironment} from '@angular/cdk/testing/testbed'; +import {TestBed, waitForAsync} from '@angular/core/testing'; +import {MatPaginatorHarness} from '@angular/material/paginator/testing'; +import {NoopAnimationsModule} from '@angular/platform-browser/animations'; +import {RouterTestingModule} from '@angular/router/testing'; +import {ReplaySubject} from 'rxjs'; + +import {StatEntry} from '../../../lib/api/api_interfaces'; +import {createStatEntry} from '../../../lib/api/api_test_util'; +import {FlowResult, ResultSource, ResultTypeQuery} from '../../../lib/models/flow'; +import {HuntPageGlobalStore} from '../../../store/hunt_page_global_store'; +import {HuntPageGlobalStoreMock, mockHuntPageGlobalStore} from '../../../store/hunt_page_global_store_test_util'; +import {STORE_PROVIDERS} from '../../../store/store_test_providers'; +import {initTestEnvironment} from '../../../testing'; + +import {FlowDataTableView} from './flow_table'; + +initTestEnvironment(); + +function mockResultSource() { + return { + results$: new ReplaySubject(1), + totalCount$: new ReplaySubject(1), + query$: new ReplaySubject(1), + loadResults: jasmine.createSpy('loadResult'), + }; +} + +describe('FlowDataTableView component', () => { + let huntPageGlobalStore: HuntPageGlobalStoreMock; + let resultSource = mockResultSource(); + + beforeEach(waitForAsync(() => { + huntPageGlobalStore = mockHuntPageGlobalStore(); + resultSource = mockResultSource(); + TestBed + .configureTestingModule({ + imports: [ + FlowDataTableView, + NoopAnimationsModule, + RouterTestingModule.withRoutes([ + // Dummy route to stop error when navigating to details. + { + outlet: 'drawer', + path: 'result-details/:id', + component: FlowDataTableView + }, + ]), + ], + providers: [ + ...STORE_PROVIDERS, + ], + teardown: {destroyAfterEach: false} + }) + .overrideProvider( + HuntPageGlobalStore, {useFactory: () => huntPageGlobalStore}) + .overrideProvider( + ResultSource, + {useFactory: () => resultSource as ResultSource}) + .compileComponents(); + })); + + it('displays translated flow results', () => { + const fixture = TestBed.createComponent(FlowDataTableView); + resultSource.totalCount$.next(2); + resultSource.query$.next({type: 'StatEntry'}); + fixture.detectChanges(); + + resultSource.results$.next([ + { + timestamp: new Date(1234), + payloadType: 'StatEntry', + tag: '', + payload: createStatEntry(3), + }, + { + timestamp: new Date(5678), + payloadType: 'StatEntry', + tag: '', + payload: {pathspec: {path: '/bar'}} as StatEntry, + } + ]); + fixture.detectChanges(); + + const rows = fixture.nativeElement.querySelectorAll('mat-row'); + expect(rows.length).toBe(2); + + enum CellIndexOf { + PATH = 0, + FILE_MODE, + HASH, + HUMAN_READABLE_SIZE, + TIMESTAMP + } + + let cells = rows[0].querySelectorAll('mat-cell'); + expect(cells[CellIndexOf.PATH].innerText).toContain('/foo'); + expect(cells[CellIndexOf.FILE_MODE].innerText).toContain('-rw-r--r--'); + expect(cells[CellIndexOf.HUMAN_READABLE_SIZE].innerText).toContain('442 B'); + expect(cells[CellIndexOf.TIMESTAMP].innerText) + .toContain('2023-03-30 01:33:20 UTC'); + + cells = rows[1].querySelectorAll('mat-cell'); + expect(cells[CellIndexOf.PATH].innerText).toContain('/bar'); + }); + + it('shows total result count', async () => { + const fixture = TestBed.createComponent(FlowDataTableView); + resultSource.totalCount$.next(123); + fixture.detectChanges(); + + // For unknown, mystical, and elusive reasons, Angular's MatPaginator keeps + // showing "0 of 0" unless the MatPaginatorHarness is loaded. + // MatPaginator should show the count of 123 in tests without it, but + // weirdly doesn't. Also, loading the harness should be a no-op, but also + // isn't. + const harnessLoader = TestbedHarnessEnvironment.loader(fixture); + await harnessLoader.getHarness(MatPaginatorHarness); + + expect(fixture.debugElement.nativeElement.textContent).toContain('123'); + }); + + it('queries initial result page', async () => { + const fixture = TestBed.createComponent(FlowDataTableView); + resultSource.totalCount$.next(50); + fixture.detectChanges(); + + const harnessLoader = TestbedHarnessEnvironment.loader(fixture); + const paginationHarness = + await harnessLoader.getHarness(MatPaginatorHarness); + const pageSize = await paginationHarness.getPageSize(); + + expect(resultSource.loadResults) + .toHaveBeenCalledOnceWith({offset: 0, count: pageSize}); + }); + + it('queries next page upon pagination', async () => { + const fixture = TestBed.createComponent(FlowDataTableView); + resultSource.totalCount$.next(50); + fixture.detectChanges(); + + const harnessLoader = TestbedHarnessEnvironment.loader(fixture); + const paginationHarness = + await harnessLoader.getHarness(MatPaginatorHarness); + await paginationHarness.goToNextPage(); + + const pageSize = await paginationHarness.getPageSize(); + + expect(resultSource.loadResults) + .toHaveBeenCalledWith({offset: pageSize, count: pageSize}); + }); +}); \ No newline at end of file diff --git a/grr/server/grr_response_server/gui/ui/components/data_renderers/table/table.scss b/grr/server/grr_response_server/gui/ui/components/data_renderers/table/table.scss index e69de29bb2..1156597b74 100644 --- a/grr/server/grr_response_server/gui/ui/components/data_renderers/table/table.scss +++ b/grr/server/grr_response_server/gui/ui/components/data_renderers/table/table.scss @@ -0,0 +1,39 @@ +@use '@angular/material' as mat; +@use '../../../material-theme' as c; + +.mat-mdc-table .mat-mdc-header-row { + --mat-table-header-headline-weight: 500; +} + +.mat-mdc-cell, +.mat-mdc-header-cell { + border-color: mat.get-color-from-palette(c.$foreground, divider-light); + color: mat.get-color-from-palette(c.$foreground, text-light); + padding: 0.5em; + + &:last-of-type { + padding-right: 12px; + } + + &:first-of-type { + padding-left: 12px; + } +} + +.table-controls { + display: flex; + padding-left: 24px; +} + +.filter-input { + flex: 1; // width fills out the space +} + +.top-paginator { + margin-left: auto; // aligns to the right +} + +button.load-more { + width: 100%; + margin-bottom: 0.5em; +} \ No newline at end of file diff --git a/grr/server/grr_response_server/gui/ui/components/data_renderers/table/table.ts b/grr/server/grr_response_server/gui/ui/components/data_renderers/table/table.ts index 51f76807f2..30e383c48d 100644 --- a/grr/server/grr_response_server/gui/ui/components/data_renderers/table/table.ts +++ b/grr/server/grr_response_server/gui/ui/components/data_renderers/table/table.ts @@ -4,7 +4,7 @@ import {MatButtonModule} from '@angular/material/button'; import {MatIconModule} from '@angular/material/icon'; import {MatTableDataSource, MatTableModule} from '@angular/material/table'; import {combineLatest, Observable} from 'rxjs'; -import {filter, map, takeUntil} from 'rxjs/operators'; +import {distinctUntilChanged, filter, map, shareReplay, takeUntil} from 'rxjs/operators'; import {PAYLOAD_TYPE_TRANSLATION} from '../../../lib/api_translation/result'; import {PaginatedResultView} from '../../../lib/models/flow'; @@ -67,12 +67,9 @@ export class DataTableView extends PaginatedResultView implements private readonly translation$ = this.resultSource.query$.pipe( map(({type}) => type), filter(isNonNull), - map(type => { - const translation = - PAYLOAD_TYPE_TRANSLATION[type as keyof typeof PAYLOAD_TYPE_TRANSLATION]; - assertNonNull(translation, `PAYLOAD_TYPE_TRANSLATION for "${type}"`); - return translation; - }), + distinctUntilChanged(), + map(payloadType => this.getPayloadTypeTranslation(payloadType)), + shareReplay(1), ); protected rows$ = @@ -115,6 +112,14 @@ export class DataTableView extends PaginatedResultView implements }); } + protected getPayloadTypeTranslation(type: string) { + const translation = + PAYLOAD_TYPE_TRANSLATION[type as keyof typeof PAYLOAD_TYPE_TRANSLATION]; + + assertNonNull(translation, `PAYLOAD_TYPE translation for "${type}"`); + return translation; + } + protected readonly trackByIndex: TrackByFunction = (index) => index; protected readonly trackByKey: TrackByFunction> = (index, pair) => diff --git a/grr/server/grr_response_server/gui/ui/components/expandable_hash/expandable_hash.ts b/grr/server/grr_response_server/gui/ui/components/expandable_hash/expandable_hash.ts index a8c26d7700..fcc8357c81 100644 --- a/grr/server/grr_response_server/gui/ui/components/expandable_hash/expandable_hash.ts +++ b/grr/server/grr_response_server/gui/ui/components/expandable_hash/expandable_hash.ts @@ -1,6 +1,6 @@ import {Component, Input, ViewEncapsulation} from '@angular/core'; -import {hashName, HexHash} from '../../lib/models/flow'; +import {hashName, type HexHash} from '../../lib/models/flow'; interface HashEntry { readonly name: string; diff --git a/grr/server/grr_response_server/gui/ui/components/file_details/file_details.ts b/grr/server/grr_response_server/gui/ui/components/file_details/file_details.ts index 90800bcd2b..4791484c29 100644 --- a/grr/server/grr_response_server/gui/ui/components/file_details/file_details.ts +++ b/grr/server/grr_response_server/gui/ui/components/file_details/file_details.ts @@ -3,7 +3,7 @@ import {Router} from '@angular/router'; import {map} from 'rxjs/operators'; import {getFileBlobUrl} from '../../lib/api/http_api_service'; -import {FileIdentifier} from '../../lib/models/vfs'; +import {type FileIdentifier} from '../../lib/models/vfs'; import {isNonNull} from '../../lib/preconditions'; import {observeOnDestroy} from '../../lib/reactive'; import {FileDetailsLocalStore} from '../../store/file_details_local_store'; diff --git a/grr/server/grr_response_server/gui/ui/components/flow_args_form/flow_args_form.ts b/grr/server/grr_response_server/gui/ui/components/flow_args_form/flow_args_form.ts index cdc211bc0b..c7a1872c28 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_args_form/flow_args_form.ts +++ b/grr/server/grr_response_server/gui/ui/components/flow_args_form/flow_args_form.ts @@ -3,7 +3,7 @@ import {Observable, ReplaySubject} from 'rxjs'; import {takeUntil} from 'rxjs/operators'; import {DEFAULT_FORM, FORMS} from '../../components/flow_args_form/sub_forms'; -import {FlowDescriptor, FlowType} from '../../lib/models/flow'; +import {type FlowDescriptor, FlowType} from '../../lib/models/flow'; import {observeOnDestroy} from '../../lib/reactive'; import {FlowArgumentForm} from './form_interface'; diff --git a/grr/server/grr_response_server/gui/ui/components/flow_args_form/flow_args_form_test.ts b/grr/server/grr_response_server/gui/ui/components/flow_args_form/flow_args_form_test.ts index 5054ed5f8e..e947b43c4d 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_args_form/flow_args_form_test.ts +++ b/grr/server/grr_response_server/gui/ui/components/flow_args_form/flow_args_form_test.ts @@ -12,7 +12,7 @@ import {firstValueFrom, ReplaySubject, Subject} from 'rxjs'; import {FlowArgsFormModule} from '../../components/flow_args_form/module'; import {ArtifactCollectorFlowArgs, Browser, CollectBrowserHistoryArgs, CollectFilesByKnownPathArgsCollectionLevel, ExecutePythonHackArgs, GlobComponentExplanation, LaunchBinaryArgs, ListNamedPipesFlowArgsPipeEndFilter, ListNamedPipesFlowArgsPipeTypeFilter, TimelineArgs} from '../../lib/api/api_interfaces'; import {ApiModule} from '../../lib/api/module'; -import {BinaryType, FlowDescriptor, FlowType, OperatingSystem, SourceType} from '../../lib/models/flow'; +import {BinaryType, type FlowDescriptor, FlowType, OperatingSystem, SourceType} from '../../lib/models/flow'; import {newArtifactDescriptorMap, newClient} from '../../lib/models/model_test_util'; import {ExplainGlobExpressionService} from '../../lib/service/explain_glob_expression_service/explain_glob_expression_service'; import {deepFreeze} from '../../lib/type_utils'; diff --git a/grr/server/grr_response_server/gui/ui/components/flow_args_form/osquery_query_helper/table_info_item.ts b/grr/server/grr_response_server/gui/ui/components/flow_args_form/osquery_query_helper/table_info_item.ts index 828466c2ac..4fd2f49c70 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_args_form/osquery_query_helper/table_info_item.ts +++ b/grr/server/grr_response_server/gui/ui/components/flow_args_form/osquery_query_helper/table_info_item.ts @@ -2,7 +2,7 @@ import {Component, Input} from '@angular/core'; import {Match, stringWithHighlightsFromMatch, StringWithHighlightsPart} from '../../../lib/fuzzy_matcher'; -import {OsqueryTableSpec} from './osquery_table_specs'; +import {type OsqueryTableSpec} from './osquery_table_specs'; /** An item containing table info to display in the query helper menu */ diff --git a/grr/server/grr_response_server/gui/ui/components/flow_args_form/sub_forms.ts b/grr/server/grr_response_server/gui/ui/components/flow_args_form/sub_forms.ts index c72019368e..89586bc189 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_args_form/sub_forms.ts +++ b/grr/server/grr_response_server/gui/ui/components/flow_args_form/sub_forms.ts @@ -28,6 +28,8 @@ export const FORMS: {[key in FlowType]?: Type>} = { [FlowType.COLLECT_BROWSER_HISTORY]: CollectBrowserHistoryForm, [FlowType.COLLECT_FILES_BY_KNOWN_PATH]: CollectFilesByKnownPathForm, [FlowType.COLLECT_MULTIPLE_FILES]: CollectMultipleFilesForm, + [FlowType.STAT_MULTIPLE_FILES]: CollectMultipleFilesForm, + [FlowType.HASH_MULTIPLE_FILES]: CollectMultipleFilesForm, [FlowType.DUMP_PROCESS_MEMORY]: DumpProcessMemoryForm, [FlowType.EXECUTE_PYTHON_HACK]: ExecutePythonHackForm, [FlowType.LAUNCH_BINARY]: LaunchBinaryForm, @@ -43,9 +45,7 @@ export const FORMS: {[key in FlowType]?: Type>} = { // Show empty form as fallback for flows that typically do not require // configuration. - [FlowType.COLLECT_EFI_HASHES]: FallbackFlowArgsForm, [FlowType.COLLECT_RUNKEY_BINARIES]: FallbackFlowArgsForm, - [FlowType.DUMP_EFI_IMAGE]: FallbackFlowArgsForm, [FlowType.DUMP_FLASH_IMAGE]: FallbackFlowArgsForm, [FlowType.GET_CLIENT_STATS]: FallbackFlowArgsForm, [FlowType.GET_MBR]: FallbackFlowArgsForm, diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/flow_details.ts b/grr/server/grr_response_server/gui/ui/components/flow_details/flow_details.ts index c7048f507a..5c9b9710f3 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_details/flow_details.ts +++ b/grr/server/grr_response_server/gui/ui/components/flow_details/flow_details.ts @@ -3,7 +3,7 @@ import {BehaviorSubject, combineLatest, Observable} from 'rxjs'; import {map, startWith} from 'rxjs/operators'; import {ExportMenuItem, Plugin as FlowDetailsPlugin} from '../../components/flow_details/plugins/plugin'; -import {Flow, FlowDescriptor, FlowState, FlowType, getFlowTitleFromFlow} from '../../lib/models/flow'; +import {type Flow, type FlowDescriptor, FlowState, FlowType, getFlowTitleFromFlow} from '../../lib/models/flow'; import {isNonNull} from '../../lib/preconditions'; import {FlowResultsLocalStore} from '../../store/flow_results_local_store'; import {FlowArgsViewData} from '../flow_args_view/flow_args_view'; diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/helpers/osquery_results_table.ts b/grr/server/grr_response_server/gui/ui/components/flow_details/helpers/osquery_results_table.ts index 8f119e73eb..139fd3d857 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_details/helpers/osquery_results_table.ts +++ b/grr/server/grr_response_server/gui/ui/components/flow_details/helpers/osquery_results_table.ts @@ -3,7 +3,7 @@ import {MatSort} from '@angular/material/sort'; import {MatTableDataSource} from '@angular/material/table'; import {BehaviorSubject} from 'rxjs'; -import {OsqueryTable} from '../../../lib/api/api_interfaces'; +import {type OsqueryTable} from '../../../lib/api/api_interfaces'; import {isNonNull} from '../../../lib/preconditions'; interface Row { diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/plugin_registry.ts b/grr/server/grr_response_server/gui/ui/components/flow_details/plugin_registry.ts index 36d4f652af..a0dc015057 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_details/plugin_registry.ts +++ b/grr/server/grr_response_server/gui/ui/components/flow_details/plugin_registry.ts @@ -3,6 +3,8 @@ import {Type} from '@angular/core'; import {CollectBrowserHistoryDetails} from '../../components/flow_details/plugins/collect_browser_history_details'; import {CollectFilesByKnownPathDetails} from '../../components/flow_details/plugins/collect_files_by_known_path_details'; import {CollectMultipleFilesDetails} from '../../components/flow_details/plugins/collect_multiple_files_details'; +import {HashMultipleFilesDetails} from '../../components/flow_details/plugins/hash_multiple_files_details'; +import {StatMultipleFilesDetails} from '../../components/flow_details/plugins/stat_multiple_files_details'; import {FlowType} from '../../lib/models/flow'; import {ArtifactCollectorFlowDetails} from './plugins/artifact_collector_flow_details'; @@ -36,6 +38,8 @@ export const FLOW_DETAILS_PLUGIN_REGISTRY: [FlowType.COLLECT_BROWSER_HISTORY]: CollectBrowserHistoryDetails, [FlowType.COLLECT_FILES_BY_KNOWN_PATH]: CollectFilesByKnownPathDetails, [FlowType.COLLECT_MULTIPLE_FILES]: CollectMultipleFilesDetails, + [FlowType.STAT_MULTIPLE_FILES]: StatMultipleFilesDetails, + [FlowType.HASH_MULTIPLE_FILES]: HashMultipleFilesDetails, [FlowType.DUMP_PROCESS_MEMORY]: DumpProcessMemoryDetails, [FlowType.EXECUTE_PYTHON_HACK]: ExecutePythonHackDetails, [FlowType.FILE_FINDER]: FileFinderDetails, diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details.ng.html b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details.ng.html new file mode 100644 index 0000000000..d9a6a69b2e --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details.ng.html @@ -0,0 +1,8 @@ + + + + \ No newline at end of file diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details.ts b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details.ts new file mode 100644 index 0000000000..847c2373b7 --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details.ts @@ -0,0 +1,71 @@ +import {ChangeDetectionStrategy, Component} from '@angular/core'; +import {Observable} from 'rxjs'; +import {map} from 'rxjs/operators'; + +import {FlowFileResult, flowFileResultFromStatEntry} from '../../../components/flow_details/helpers/file_results_table'; +import {CollectMultipleFilesArgs, CollectMultipleFilesResult, HashMultipleFilesProgress} from '../../../lib/api/api_interfaces'; +import {translateHashToHex, translateStatEntry} from '../../../lib/api_translation/flow'; +import {Flow} from '../../../lib/models/flow'; +import {FlowResultMapFunction, FlowResultsQueryWithAdapter} from '../helpers/load_flow_results_directive'; + +import {ExportMenuItem, Plugin} from './plugin'; + + +const ADAPTER: FlowResultMapFunction = + (results) => + results?.map(item => item.payload as CollectMultipleFilesResult) + .map( + res => flowFileResultFromStatEntry( + translateStatEntry(res.stat!), + translateHashToHex(res.hash ?? {}))); + + +/** + * Component that displays results of CollectMultipleFiles flow. + */ +@Component({ + selector: 'hash-multiple-files-details', + templateUrl: './hash_multiple_files_details.ng.html', + styleUrls: ['./_base.scss'], + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class HashMultipleFilesDetails extends Plugin { + readonly QUERY_MORE_COUNT = 100; + + readonly args$: Observable = this.flow$.pipe( + map((flow) => flow.args as CollectMultipleFilesArgs), + ); + + readonly flowProgress$: Observable = + this.flow$.pipe( + map((flow) => flow.progress as HashMultipleFilesProgress), + ); + + readonly totalFiles$: Observable = this.flowProgress$.pipe( + map((progress) => Number(progress?.numHashed ?? 0))); + + readonly query$: Observable< + FlowResultsQueryWithAdapter> = + this.flow$.pipe(map(flow => ({flow, resultMapper: ADAPTER}))); + + readonly description$ = this.args$.pipe(map(args => { + const length = args.pathExpressions?.length ?? 0; + if (length <= 1) { + return args.pathExpressions?.[0] ?? ''; + } else { + return `${args.pathExpressions?.[0]} + ${length - 1} more`; + } + })); + + override getExportMenuItems(flow: Flow): readonly ExportMenuItem[] { + const downloadItem = this.getDownloadFilesExportMenuItem(flow); + const items = super.getExportMenuItems(flow); + + if (items.find(item => item.url === downloadItem.url)) { + return items; + } + + // If the menu does not yet contain "Download files", display it. + return [downloadItem, ...items]; + } +} \ No newline at end of file diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details_test.ts b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details_test.ts new file mode 100644 index 0000000000..6a48fbd929 --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/hash_multiple_files_details_test.ts @@ -0,0 +1,53 @@ +import {TestBed, waitForAsync} from '@angular/core/testing'; +import {NoopAnimationsModule} from '@angular/platform-browser/animations'; + +import {CollectMultipleFilesArgs, HashMultipleFilesProgress} from '../../../lib/api/api_interfaces'; +import {FlowState} from '../../../lib/models/flow'; +import {newFlow} from '../../../lib/models/model_test_util'; +import {initTestEnvironment} from '../../../testing'; + +import {CollectMultipleFilesDetails} from './collect_multiple_files_details'; +import {PluginsModule} from './module'; + + +initTestEnvironment(); + +describe('collect-multiple-files-details component', () => { + beforeEach(waitForAsync(() => { + TestBed + .configureTestingModule({ + imports: [ + NoopAnimationsModule, + PluginsModule, + ], + providers: [], + teardown: {destroyAfterEach: false} + }) + .compileComponents(); + })); + + + it('shows file download button', () => { + const fixture = TestBed.createComponent(CollectMultipleFilesDetails); + const args: CollectMultipleFilesArgs = {pathExpressions: ['/foo/**']}; + const progress: HashMultipleFilesProgress = { + numHashed: '42', + }; + + fixture.componentInstance.flow = newFlow({ + name: 'HashMultipleFiles', + args, + progress, + state: FlowState.FINISHED, + }); + fixture.detectChanges(); + + const menuItems = fixture.componentInstance.getExportMenuItems( + fixture.componentInstance.flow); + expect(menuItems[0]) + .toEqual(fixture.componentInstance.getDownloadFilesExportMenuItem( + fixture.componentInstance.flow)); + expect(menuItems[0].url) + .toMatch('/api/v2/clients/.+/flows/.+/results/files-archive'); + }); +}); diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/module.ts b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/module.ts index 57da3fccfa..eb18b9a330 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/module.ts +++ b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/module.ts @@ -36,6 +36,7 @@ import {DefaultDetails} from './default_details'; import {DumpProcessMemoryDetails} from './dump_process_memory_details'; import {ExecutePythonHackDetails} from './execute_python_hack_details'; import {FileFinderDetails} from './file_finder_details'; +import {HashMultipleFilesDetails} from './hash_multiple_files_details'; import {InterrogateDetails} from './interrogate_details'; import {LaunchBinaryDetails} from './launch_binary_details'; import {ListDirectoryDetails} from './list_directory_details'; @@ -44,6 +45,7 @@ import {NetstatDetails} from './netstat_details'; import {OnlineNotificationDetails} from './online_notification_details'; import {OsqueryDetails} from './osquery_details'; import {ReadLowLevelDetails} from './read_low_level_details'; +import {StatMultipleFilesDetails} from './stat_multiple_files_details'; import {YaraProcessScanDetails} from './yara_process_scan_details'; @@ -59,6 +61,7 @@ const COMPONENTS = [ DumpProcessMemoryDetails, ExecutePythonHackDetails, FileFinderDetails, + HashMultipleFilesDetails, InterrogateDetails, LaunchBinaryDetails, ListDirectoryDetails, @@ -67,6 +70,7 @@ const COMPONENTS = [ OnlineNotificationDetails, OsqueryDetails, ReadLowLevelDetails, + StatMultipleFilesDetails, YaraProcessScanDetails, // keep-sorted end // clang-format on diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/plugin.ts b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/plugin.ts index 9f437b21eb..08ed287e3f 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/plugin.ts +++ b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/plugin.ts @@ -3,7 +3,7 @@ import {ReplaySubject} from 'rxjs'; import {map} from 'rxjs/operators'; import {getExportedResultsCsvUrl, getExportedResultsSqliteUrl, getExportedResultsYamlUrl, getFlowFilesArchiveUrl} from '../../../lib/api/http_api_service'; -import {Flow, FlowState} from '../../../lib/models/flow'; +import {type Flow, FlowState} from '../../../lib/models/flow'; import {isNonNull} from '../../../lib/preconditions'; import {observeOnDestroy} from '../../../lib/reactive'; import {makeLegacyLink} from '../../../lib/routing'; diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details.ng.html b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details.ng.html new file mode 100644 index 0000000000..c8010bf7bc --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details.ng.html @@ -0,0 +1,8 @@ + + + + \ No newline at end of file diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details.ts b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details.ts new file mode 100644 index 0000000000..1804e4daa7 --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details.ts @@ -0,0 +1,73 @@ +import {ChangeDetectionStrategy, Component} from '@angular/core'; +import {Observable} from 'rxjs'; +import {map} from 'rxjs/operators'; + +import {FlowFileResult, flowFileResultFromStatEntry} from '../../../components/flow_details/helpers/file_results_table'; +import {CollectMultipleFilesArgs, StatEntry} from '../../../lib/api/api_interfaces'; +import {translateStatEntry} from '../../../lib/api_translation/flow'; +import {Flow} from '../../../lib/models/flow'; +import {PayloadType} from '../../../lib/models/result'; +import {FlowResultMapFunction, FlowResultsQueryWithAdapter} from '../helpers/load_flow_results_directive'; + +import {ExportMenuItem, Plugin} from './plugin'; + + +const ADAPTER: FlowResultMapFunction = + (results) => results?.map( + (result) => flowFileResultFromStatEntry( + translateStatEntry(result.payload as StatEntry))); + + +/** + * Component that displays results of StatMultipleFiles flow. + */ +@Component({ + selector: 'stat-multiple-files-details', + templateUrl: './stat_multiple_files_details.ng.html', + styleUrls: ['./_base.scss'], + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class StatMultipleFilesDetails extends Plugin { + readonly QUERY_MORE_COUNT = 100; + + readonly args$: Observable = this.flow$.pipe( + map((flow) => flow.args as CollectMultipleFilesArgs), + ); + + readonly flowResultsCount$ = this.flow$.pipe(map((flow) => { + const resultsByType = flow?.resultCounts ?? []; + + const statEntryResultCount = + resultsByType.find(count => count.type === PayloadType.STAT_ENTRY); + + return statEntryResultCount?.count ?? 0; + })); + + readonly isResultsSectionExpandable$ = + this.flowResultsCount$.pipe(map((resultsCount) => resultsCount > 0)); + + readonly query$: Observable< + FlowResultsQueryWithAdapter> = + this.flow$.pipe(map(flow => ({flow, resultMapper: ADAPTER}))); + + readonly description$ = this.args$.pipe(map(args => { + const length = args.pathExpressions?.length ?? 0; + if (length <= 1) { + return args.pathExpressions?.[0] ?? ''; + } else { + return `${args.pathExpressions?.[0]} + ${length - 1} more`; + } + })); + + override getExportMenuItems(flow: Flow): readonly ExportMenuItem[] { + const downloadItem = this.getDownloadFilesExportMenuItem(flow); + const items = super.getExportMenuItems(flow); + + if (items.find(item => item.url === downloadItem.url)) { + return items; + } + + // If the menu does not yet contain "Download files", display it. + return [downloadItem, ...items]; + } +} diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details_test.ts b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details_test.ts new file mode 100644 index 0000000000..6f504d1d09 --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/stat_multiple_files_details_test.ts @@ -0,0 +1,51 @@ +import {TestBed, waitForAsync} from '@angular/core/testing'; +import {NoopAnimationsModule} from '@angular/platform-browser/animations'; + +import {CollectMultipleFilesArgs, DefaultFlowProgress} from '../../../lib/api/api_interfaces'; +import {FlowState} from '../../../lib/models/flow'; +import {newFlow} from '../../../lib/models/model_test_util'; +import {initTestEnvironment} from '../../../testing'; + +import {PluginsModule} from './module'; +import {StatMultipleFilesDetails} from './stat_multiple_files_details'; + + +initTestEnvironment(); + +describe('stat-multiple-files-details component', () => { + beforeEach(waitForAsync(() => { + TestBed + .configureTestingModule({ + imports: [ + NoopAnimationsModule, + PluginsModule, + ], + providers: [], + teardown: {destroyAfterEach: false} + }) + .compileComponents(); + })); + + + it('shows file download button', () => { + const fixture = TestBed.createComponent(StatMultipleFilesDetails); + const args: CollectMultipleFilesArgs = {pathExpressions: ['/foo/**']}; + const progress: DefaultFlowProgress = {}; + + fixture.componentInstance.flow = newFlow({ + name: 'StatMultipleFiles', + args, + progress, + state: FlowState.FINISHED, + }); + fixture.detectChanges(); + + const menuItems = fixture.componentInstance.getExportMenuItems( + fixture.componentInstance.flow); + expect(menuItems[0]) + .toEqual(fixture.componentInstance.getDownloadFilesExportMenuItem( + fixture.componentInstance.flow)); + expect(menuItems[0].url) + .toMatch('/api/v2/clients/.+/flows/.+/results/files-archive'); + }); +}); diff --git a/grr/server/grr_response_server/gui/ui/components/flow_list/flow_list_test.ts b/grr/server/grr_response_server/gui/ui/components/flow_list/flow_list_test.ts index 02f8934785..7a44d01313 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_list/flow_list_test.ts +++ b/grr/server/grr_response_server/gui/ui/components/flow_list/flow_list_test.ts @@ -37,15 +37,15 @@ function initFlowList( friendlyName: 'Client Side File Finder', }, { - name: 'KeepAlive', - friendlyName: 'KeepAlive', + name: 'Kill', + friendlyName: 'Kill GRR agent process', })); clientPageGlobalStore.mockedObservables.flowListEntries$.next({ isLoading: false, hasMore: false, flows: [ newFlow({ - name: 'KeepAlive', + name: 'Kill', creator: 'morty', isRobot: true, }), @@ -104,7 +104,7 @@ describe('FlowList Component', () => { fixture.detectChanges(); const text = fixture.debugElement.nativeElement.textContent; - expect(text).toContain('KeepAlive'); + expect(text).toContain('Kill'); expect(text).toContain('morty'); expect(text).toContain('Client Side File Finder'); expect(text).toContain('rick'); @@ -117,7 +117,7 @@ describe('FlowList Component', () => { fixture.detectChanges(); const text = fixture.debugElement.nativeElement.textContent; - expect(text).not.toContain('KeepAlive'); + expect(text).not.toContain('Kill'); expect(text).not.toContain('morty'); expect(text).toContain('Client Side File Finder'); expect(text).toContain('rick'); @@ -134,7 +134,7 @@ describe('FlowList Component', () => { fixture.detectChanges(); const text = fixture.debugElement.nativeElement.textContent; - expect(text).toContain('KeepAlive'); + expect(text).toContain('Kill'); expect(text).toContain('morty'); expect(text).not.toContain('Client Side File Finder'); expect(text).not.toContain('rick'); @@ -153,7 +153,7 @@ describe('FlowList Component', () => { hasMore: false, flows: [ newFlow({ - name: 'KeepAlive', + name: 'Kill', creator: 'morty', }), newFlow({ @@ -166,7 +166,7 @@ describe('FlowList Component', () => { const text = fixture.debugElement.nativeElement.textContent; expect(text).toContain('ClientFileFinder'); - expect(text).toContain('KeepAlive'); + expect(text).toContain('Kill'); }); it('updates flow list on a change in observable', () => { @@ -182,7 +182,7 @@ describe('FlowList Component', () => { hasMore: false, flows: [ newFlow({ - name: 'KeepAlive', + name: 'Kill', creator: 'morty', }), ], @@ -190,7 +190,7 @@ describe('FlowList Component', () => { fixture.detectChanges(); let text = fixture.debugElement.nativeElement.textContent; - expect(text).toContain('KeepAlive'); + expect(text).toContain('Kill'); clientPageGlobalStore.mockedObservables.flowListEntries$.next({ isLoading: false, @@ -205,7 +205,7 @@ describe('FlowList Component', () => { fixture.detectChanges(); text = fixture.debugElement.nativeElement.textContent; - expect(text).not.toContain('KeepAlive'); + expect(text).not.toContain('Kill'); expect(text).toContain('ClientFileFinder'); }); diff --git a/grr/server/grr_response_server/gui/ui/components/flow_picker/flow_list_item.ts b/grr/server/grr_response_server/gui/ui/components/flow_picker/flow_list_item.ts index 6958a5cbca..cc473e27ca 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_picker/flow_list_item.ts +++ b/grr/server/grr_response_server/gui/ui/components/flow_picker/flow_list_item.ts @@ -14,67 +14,89 @@ export type FlowsByCategory = ReadonlyMap; /** * Flows, split by category, to be displayed by the flow picker. */ -const FLOWS_BY_CATEGORY: FlowsByCategory = new Map(Object.entries({ - // TODO: Commented out flows do not have a proper flow form yet. - // Hide them, to not show users an option that they cannot use. - 'Collectors': [ - FLOW_LIST_ITEMS_BY_TYPE[FlowType.ARTIFACT_COLLECTOR_FLOW]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.OS_QUERY_FLOW]!, +// Note: The Map instantiation is less readable without Object.entries(), but +// the reason to not use it is the possible property name obfuscation issues. +const FLOWS_BY_CATEGORY: FlowsByCategory = new Map([ + // TODO: Commented out flows do not have a proper flow form + // yet. Hide them, to not show users an option that they cannot use. + [ + 'Collectors', + [ + FLOW_LIST_ITEMS_BY_TYPE[FlowType.ARTIFACT_COLLECTOR_FLOW]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.OS_QUERY_FLOW]!, + ] ], - 'Browser': [ - FLOW_LIST_ITEMS_BY_TYPE[FlowType.COLLECT_BROWSER_HISTORY]!, + [ + 'Browser', + [ + FLOW_LIST_ITEMS_BY_TYPE[FlowType.COLLECT_BROWSER_HISTORY]!, + ] ], - 'Hardware': [ - FLOW_LIST_ITEMS_BY_TYPE[FlowType.COLLECT_EFI_HASHES]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.DUMP_ACPI_TABLE]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.DUMP_EFI_IMAGE]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.DUMP_FLASH_IMAGE]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.GET_MBR]!, + [ + 'Hardware', + [ + FLOW_LIST_ITEMS_BY_TYPE[FlowType.DUMP_ACPI_TABLE]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.DUMP_FLASH_IMAGE]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.GET_MBR]!, + ] ], - 'Filesystem': [ - FLOW_LIST_ITEMS_BY_TYPE[FlowType.COLLECT_FILES_BY_KNOWN_PATH]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.COLLECT_MULTIPLE_FILES]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.LIST_DIRECTORY]!, - // TODO: - // fli('ListVolumeShadowCopies', 'List volume shadow copies'), - // fli('RecursiveListDirectory', 'List directory recursively', - // 'Lists and stats all files in directory and its subdirectories'), - // fli('SendFile', 'Send file over network'), - FLOW_LIST_ITEMS_BY_TYPE[FlowType.TIMELINE_FLOW]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.READ_LOW_LEVEL]!, + [ + 'Filesystem', + [ + FLOW_LIST_ITEMS_BY_TYPE[FlowType.COLLECT_FILES_BY_KNOWN_PATH]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.COLLECT_MULTIPLE_FILES]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.STAT_MULTIPLE_FILES]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.HASH_MULTIPLE_FILES]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.LIST_DIRECTORY]!, + // TODO: + // fli('ListVolumeShadowCopies', 'List volume shadow copies'), + // fli('RecursiveListDirectory', 'List directory recursively', + // 'Lists and stats all files in directory and its subdirectories'), + FLOW_LIST_ITEMS_BY_TYPE[FlowType.TIMELINE_FLOW]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.READ_LOW_LEVEL]!, + ] ], - 'Administrative': [ - FLOW_LIST_ITEMS_BY_TYPE[FlowType.ONLINE_NOTIFICATION]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.EXECUTE_PYTHON_HACK]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.INTERROGATE]!, - // TODO: - // fli('GetClientStats', 'Collect GRR statistics', - // 'Collect agent statistics including processor, memory, and network - // usage'), - FLOW_LIST_ITEMS_BY_TYPE[FlowType.KILL]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.LAUNCH_BINARY]!, - // TODO: - // fli('OnlineNotification', 'Notify when online', - // 'Send an email notification when the GRR agent comes online'), - // fli('Uninstall', 'Uninstall GRR', - // 'Permanently uninstall GRR from the host'), - // fli('UpdateClient', 'Update GRR client', - // 'Update GRR on the host to the latest version'), + [ + 'Administrative', + [ + FLOW_LIST_ITEMS_BY_TYPE[FlowType.ONLINE_NOTIFICATION]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.EXECUTE_PYTHON_HACK]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.INTERROGATE]!, + // TODO: + // fli('GetClientStats', 'Collect GRR statistics', + // 'Collect agent statistics including processor, memory, and + // network usage'), + FLOW_LIST_ITEMS_BY_TYPE[FlowType.KILL]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.LAUNCH_BINARY]!, + // TODO: + // fli('OnlineNotification', 'Notify when online', + // 'Send an email notification when the GRR agent comes online'), + // fli('Uninstall', 'Uninstall GRR', + // 'Permanently uninstall GRR from the host'), + // fli('UpdateClient', 'Update GRR client', + // 'Update GRR on the host to the latest version'), + ] ], - 'Processes': [ - FLOW_LIST_ITEMS_BY_TYPE[FlowType.LIST_PROCESSES]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.LIST_NAMED_PIPES_FLOW]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.DUMP_PROCESS_MEMORY]!, - FLOW_LIST_ITEMS_BY_TYPE[FlowType.YARA_PROCESS_SCAN]!, + [ + 'Processes', + [ + FLOW_LIST_ITEMS_BY_TYPE[FlowType.LIST_PROCESSES]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.LIST_NAMED_PIPES_FLOW]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.DUMP_PROCESS_MEMORY]!, + FLOW_LIST_ITEMS_BY_TYPE[FlowType.YARA_PROCESS_SCAN]!, + ] ], - 'Network': [ - FLOW_LIST_ITEMS_BY_TYPE[FlowType.NETSTAT]!, + [ + 'Network', + [ + FLOW_LIST_ITEMS_BY_TYPE[FlowType.NETSTAT]!, + ] ], // TODO: - // 'Registry': [ + // ['Registry', [ // fli('RegistryFinder', 'Find registry keys/values'), - // ], -})); + // ]], +]); /** * List of commonly used flow names. diff --git a/grr/server/grr_response_server/gui/ui/components/flow_picker/flow_picker.ng.html b/grr/server/grr_response_server/gui/ui/components/flow_picker/flow_picker.ng.html index c5d870c8c2..a1690eb439 100644 --- a/grr/server/grr_response_server/gui/ui/components/flow_picker/flow_picker.ng.html +++ b/grr/server/grr_response_server/gui/ui/components/flow_picker/flow_picker.ng.html @@ -60,6 +60,7 @@ [autoActiveFirstOption]="true" [displayWith]="displayWith" (optionSelected)="selectFlow($event.option.value)" + name="flow-picker" >
void; type OnTouchedFn = () => void; diff --git a/grr/server/grr_response_server/gui/ui/components/home/recent_client_flows/recent_client_flows.ts b/grr/server/grr_response_server/gui/ui/components/home/recent_client_flows/recent_client_flows.ts index 99cf8aa229..f936409240 100644 --- a/grr/server/grr_response_server/gui/ui/components/home/recent_client_flows/recent_client_flows.ts +++ b/grr/server/grr_response_server/gui/ui/components/home/recent_client_flows/recent_client_flows.ts @@ -2,7 +2,7 @@ import {ChangeDetectionStrategy, Component, Input} from '@angular/core'; import {BehaviorSubject, combineLatest, Observable} from 'rxjs'; import {map} from 'rxjs/operators'; -import {ClientApproval} from '../../../lib/models/client'; +import {type ClientApproval} from '../../../lib/models/client'; import {FlowWithDescriptor, withDescriptor} from '../../../lib/models/flow'; import {isNull} from '../../../lib/preconditions'; import {ConfigGlobalStore} from '../../../store/config_global_store'; diff --git a/grr/server/grr_response_server/gui/ui/components/home/recent_client_flows/recent_client_flows_test.ts b/grr/server/grr_response_server/gui/ui/components/home/recent_client_flows/recent_client_flows_test.ts index 37b1c55185..e93976d952 100644 --- a/grr/server/grr_response_server/gui/ui/components/home/recent_client_flows/recent_client_flows_test.ts +++ b/grr/server/grr_response_server/gui/ui/components/home/recent_client_flows/recent_client_flows_test.ts @@ -110,14 +110,14 @@ describe('RecentClientFlows Component', () => { friendlyName: 'Client Side File Finder', }, { - name: 'KeepAlive', - friendlyName: 'KeepAlive', + name: 'Kill', + friendlyName: 'Kill GRR agent process', })); recentClientFlowsLocalStore.mockedObservables.flowListEntries$.next({ flows: [ newFlow({ - name: 'KeepAlive', + name: 'Kill', creator: 'ricky', clientId: 'C.1111', }), @@ -142,4 +142,4 @@ describe('RecentClientFlows Component', () => { expect(flowDetailsCard[1].nativeElement.textContent).toContain('rick'); expect(flowDetailsCard[2].nativeElement.textContent).toContain('bob'); }); -}); \ No newline at end of file +}); diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_arguments/hunt_arguments.ts b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_arguments/hunt_arguments.ts index cb203a99af..7798c2d1c7 100644 --- a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_arguments/hunt_arguments.ts +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_arguments/hunt_arguments.ts @@ -7,7 +7,7 @@ import {MatTooltipModule} from '@angular/material/tooltip'; import {RouterModule} from '@angular/router'; import {ForemanClientRuleSetMatchMode, ForemanClientRuleType, ForemanIntegerClientRuleForemanIntegerField, ForemanIntegerClientRuleOperator, ForemanLabelClientRuleMatchMode, ForemanRegexClientRuleForemanStringField} from '../../../lib/api/api_interfaces'; -import {Hunt} from '../../../lib/models/hunt'; +import {type Hunt} from '../../../lib/models/hunt'; import {HuntOverviewPageLocalStore} from '../../../store/hunt_overview_page_local_store'; import {toDurationString} from '../../form/duration_input/duration_conversion'; import {CopyButtonModule} from '../../helpers/copy_button/copy_button_module'; diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_flow_arguments/hunt_flow_arguments.ts b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_flow_arguments/hunt_flow_arguments.ts index c3bd2511d4..4e51ab76e3 100644 --- a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_flow_arguments/hunt_flow_arguments.ts +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_flow_arguments/hunt_flow_arguments.ts @@ -5,7 +5,7 @@ import {BehaviorSubject, combineLatest, Observable} from 'rxjs'; import {map, startWith} from 'rxjs/operators'; import {getFlowTitleFromFlowName} from '../../../lib/models/flow'; -import {Hunt} from '../../../lib/models/hunt'; +import {type Hunt} from '../../../lib/models/hunt'; import {ConfigGlobalStore} from '../../../store/config_global_store'; import {FlowArgsViewData} from '../../flow_args_view/flow_args_view'; import {FlowArgsViewModule} from '../../flow_args_view/module'; diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_original_reference/hunt_original_reference.ts b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_original_reference/hunt_original_reference.ts index 5b4e59723a..b3a4d9ad56 100644 --- a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_original_reference/hunt_original_reference.ts +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_original_reference/hunt_original_reference.ts @@ -2,7 +2,7 @@ import {CommonModule} from '@angular/common'; import {ChangeDetectionStrategy, Component, Input} from '@angular/core'; import {RouterModule} from '@angular/router'; -import {ApiFlowReference, ApiHuntReference} from '../../../lib/api/api_interfaces'; +import {type ApiFlowReference, type ApiHuntReference} from '../../../lib/api/api_interfaces'; import {CopyButtonModule} from '../../helpers/copy_button/copy_button_module'; /** Displays original hunt or flow reference. */ diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.ng.html b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.ng.html index a9c89db699..28260a60da 100644 --- a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.ng.html +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.ng.html @@ -20,11 +20,41 @@
- - + + + + + + + + + + Chart + + + + + + + + + Table + + + + + + + + +

There is no fleet collection progress data to show

+
+
+
diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.scss b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.scss index d1e739cff8..cf3237b11d 100644 --- a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.scss +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.scss @@ -59,12 +59,25 @@ line-height: initial; } - &.error, &.error .title { + &.error, + &.error .title { color: mat.get-color-from-palette(c.$foreground, danger); } } } +.no-data { + margin: auto; + padding: 2rem; + text-align: center; +} + +.progress-spinner { + margin: auto; + margin-top: 20px; + margin-bottom: 20px; +} + .divider-top { border-top: 1px solid mat.get-color-from-palette(c.$foreground, divider-light); } diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.ts b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.ts index b3d0fa314a..9a5cd1a1fd 100644 --- a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.ts +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress.ts @@ -1,12 +1,15 @@ import {Component} from '@angular/core'; import {combineLatest, Observable} from 'rxjs'; -import {filter, map} from 'rxjs/operators'; +import {filter, map, startWith, take} from 'rxjs/operators'; import {ApiGetHuntClientCompletionStatsResult, SampleFloat} from '../../../../lib/api/api_interfaces'; +import {LineChartDatapoint} from '../../../../lib/dataviz/line_chart'; import {HuntCompletionProgressTableRow} from '../../../../lib/models/hunt'; -import {isNonNull} from '../../../../lib/preconditions'; +import {isNonNull, isNull} from '../../../../lib/preconditions'; import {HuntPageGlobalStore} from '../../../../store/hunt_page_global_store'; import {ColorScheme} from '../../../flow_details/helpers/result_accordion'; +import {HuntProgressLineChartDataset} from '../hunt_progress_chart/hunt_progress_chart'; + /** Summary describes information in a summary card. */ interface Summary { @@ -19,6 +22,11 @@ interface Summary { const FIVE_MINUTES_IN_SECONDS = 5 * 60; const BIG_ZERO = BigInt(0); +enum HuntProgressTabIndex { + CHART_TAB = 0, + TABLE_TAB = 1, +} + function getPercentage(part: bigint, all: bigint): bigint { if (part === BIG_ZERO || all === BIG_ZERO) return BIG_ZERO; @@ -97,6 +105,62 @@ function addClientSetToBuckets( }); } +/** + * Removes entries/datapoints with a duplicated X axis value, keeping the one + * with the highest Y-Axis value (the datapoint with the most information about + * client completion progress). + */ +function prepareHuntProgressChartTimeSeriesData( + series: readonly LineChartDatapoint[], + ): LineChartDatapoint[] { + // We first sort the dataset backwards, based on the X Axis value: + const backwardsSortedSeries = + [...series].sort((a, b) => b.y - a.y).sort((a, b) => b.x - a.x); + + const existingValues = new Set(); + + const backwardsSortedFilteredSeries = backwardsSortedSeries.filter(dp => { + if (existingValues.has(dp.x)) return false; + + existingValues.add(dp.x); + + return true; + }); + + return backwardsSortedFilteredSeries.reverse(); +} + +function toHuntCompletionChartData( + progressData: ApiGetHuntClientCompletionStatsResult, + ): HuntProgressLineChartDataset { + const completedClients = prepareHuntProgressChartTimeSeriesData( + toSafeLineChartData(progressData?.completePoints)); + const inProgressClients = prepareHuntProgressChartTimeSeriesData( + toSafeLineChartData(progressData?.startPoints)); + + const huntProgressLineChartDataset: HuntProgressLineChartDataset = { + completedClients, + inProgressClients, + }; + + return huntProgressLineChartDataset; +} + +function toSafeLineChartData(dataset?: readonly SampleFloat[]): + LineChartDatapoint[] { + if (isNull(dataset)) return []; + + return dataset + .filter( + dataPoint => // We discard incomplete dataPoints: + isNonNull(dataPoint.xValue) && isNonNull(dataPoint.yValue)) + .map(dataPoint => ({ + // Convert floating-point seconds to milliseconds: + x: dataPoint.xValue! * 1_000, + y: dataPoint.yValue!, + })); +} + /** Provides progress information for the current hunt. */ @Component({ selector: 'app-hunt-progress', @@ -108,6 +172,20 @@ export class HuntProgress { constructor(private readonly huntPageGlobalStore: HuntPageGlobalStore) {} protected readonly hunt$ = this.huntPageGlobalStore.selectedHunt$; + protected readonly huntProgress$ = this.huntPageGlobalStore.huntProgress$; + protected readonly showHuntProgress$ = this.huntProgress$.pipe( + map(progress => { + const startPoints = progress?.startPoints?.length ?? 0; + const completePoints = progress?.completePoints?.length ?? 0; + + return startPoints > 0 || completePoints > 0; + }), + ); + + protected readonly huntProgressLoading$ = this.huntProgress$.pipe( + map(huntProgress => isNull(huntProgress)), + startWith(true), + ); protected overviewSummaries$: Observable = this.hunt$.pipe(map(hunt => { @@ -167,7 +245,7 @@ export class HuntProgress { readonly huntProgressTableData$: Observable = combineLatest([ - this.huntPageGlobalStore.huntProgress$, + this.huntProgress$, this.hunt$, ]) .pipe( @@ -176,6 +254,23 @@ export class HuntProgress { map(([tableData, hunt]) => this.toHuntCompletionTableData( tableData, hunt?.allClientsCount)), ); + readonly huntProgressChartData$: Observable = + this.huntProgress$.pipe( + filter((progressData) => isNonNull(progressData)), + map((progressData) => toHuntCompletionChartData(progressData)), + ); + readonly huntProgressInitiallySelectedTab$ = this.huntProgressChartData$.pipe( + map(chartData => { + // We need at least 2 datapoints in a series in order to render a line: + const hasEnoughChartData = chartData.completedClients.length >= 2 || + chartData.inProgressClients.length >= 2; + + return hasEnoughChartData ? HuntProgressTabIndex.CHART_TAB : + HuntProgressTabIndex.TABLE_TAB; + }), + // We are only interested in the first emission: + take(1), + ); private toHuntCompletionTableData( huntCompletionStatusdata: ApiGetHuntClientCompletionStatsResult, diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress_test.ts b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress_test.ts index 9d9ae65c54..def818890a 100644 --- a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress_test.ts +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/hunt_progress_test.ts @@ -1,5 +1,5 @@ import {TestbedHarnessEnvironment} from '@angular/cdk/testing/testbed'; -import {TestBed, waitForAsync} from '@angular/core/testing'; +import {discardPeriodicTasks, fakeAsync, TestBed, tick, waitForAsync} from '@angular/core/testing'; import {By} from '@angular/platform-browser'; import {NoopAnimationsModule} from '@angular/platform-browser/animations'; import {RouterTestingModule} from '@angular/router/testing'; @@ -140,13 +140,55 @@ describe('HuntProgress Component', () => { expect(summaries[4].children[2].innerText).toContain('0 clients'); }); - describe('Hunt progress table', () => { - it('does not show the hunt progress table data when there is no hunt', + describe('Hunt progress loading spinner', () => { + it('shows the loading spinner by default', () => { + const fixture = TestBed.createComponent(HuntProgress); + fixture.detectChanges(); + + expect(fixture.nativeElement.querySelector('.progress-spinner')) + .not.toBeNull(); + expect(fixture.nativeElement.querySelector('.no-data')).toBeNull(); + expect(fixture.nativeElement.querySelector('app-hunt-progress-table')) + .toBeNull(); + expect(fixture.nativeElement.querySelector('app-hunt-progress-chart')) + .toBeNull(); + }); + + it('shows the loading spinner initially, then shows the no data message', () => { const fixture = TestBed.createComponent(HuntProgress); fixture.detectChanges(); - huntPageGlobalStore.mockedObservables.selectedHunt$.next(null); + expect(fixture.nativeElement.querySelector('.progress-spinner')) + .not.toBeNull(); + expect(fixture.nativeElement.querySelector('.progress-tabs')) + .toBeNull(); + expect(fixture.nativeElement.querySelector('.no-data')).toBeNull(); + + huntPageGlobalStore.mockedObservables.huntProgress$.next({ + startPoints: [], + completePoints: [], + }); + + fixture.detectChanges(); + + expect(fixture.nativeElement.querySelector('.progress-spinner')) + .toBeNull(); + expect(fixture.nativeElement.querySelector('.progress-tabs')) + .toBeNull(); + expect(fixture.nativeElement.querySelector('.no-data')).not.toBeNull(); + }); + + it('shows the loading spinner initially, then shows the chart instead', + () => { + const fixture = TestBed.createComponent(HuntProgress); + fixture.detectChanges(); + + expect(fixture.nativeElement.querySelector('.progress-spinner')) + .not.toBeNull(); + expect(fixture.nativeElement.querySelector('.no-data')).toBeNull(); + expect(fixture.nativeElement.querySelector('.progress-tabs')) + .toBeNull(); huntPageGlobalStore.mockedObservables.huntProgress$.next({ startPoints: [ @@ -154,32 +196,72 @@ describe('HuntProgress Component', () => { xValue: 1678379900, yValue: 10, }, + { + xValue: 1678379900, + yValue: 10, + }, ], completePoints: [ { xValue: 1678379900, yValue: 5, }, + { + xValue: 1678379900, + yValue: 5, + }, ], }); fixture.detectChanges(); - const table = fixture.nativeElement.querySelector( - 'app-hunt-progress-table mat-table'); - - expect(table).toBeNull(); + expect(fixture.nativeElement.querySelector('.progress-spinner')) + .toBeNull(); + expect(fixture.nativeElement.querySelector('.no-data')).not.toBeNull(); + expect(fixture.nativeElement.querySelector('.progress-tabs')) + .not.toBeNull(); }); + }); - it('does not show the hunt progress table data when there is no hunt progress data', + describe('Progress Tabs', () => { + it('shows the progress Tabs when there is hunt progress data', () => { + const fixture = TestBed.createComponent(HuntProgress); + fixture.detectChanges(); + + huntPageGlobalStore.mockedObservables.selectedHunt$.next(newHunt({ + allClientsCount: BigInt(0), + })); + + huntPageGlobalStore.mockedObservables.huntProgress$.next({ + startPoints: [ + { + xValue: 1678379900, + yValue: 10, + }, + ], + completePoints: [ + { + xValue: 1678379900, + yValue: 5, + }, + ], + }); + + fixture.detectChanges(); + + // MatTabGroupHarness does not detect the tabs for some reason: + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(2); + + expect(tabs[0].textContent).toContain('Chart'); + expect(tabs[1].textContent).toContain('Table'); + }); + + it('does not show the hunt progress tabs when there is no progress data', () => { const fixture = TestBed.createComponent(HuntProgress); fixture.detectChanges(); - huntPageGlobalStore.mockedObservables.selectedHunt$.next(newHunt({ - allClientsCount: BigInt(0), - })); - huntPageGlobalStore.mockedObservables.huntProgress$.next({ startPoints: [], completePoints: [], @@ -187,20 +269,15 @@ describe('HuntProgress Component', () => { fixture.detectChanges(); - const table = fixture.nativeElement.querySelector( - 'app-hunt-progress-table mat-table'); - - expect(table).toBeNull(); + // MatTabGroupHarness does not detect the tabs for some reason: + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(0); }); - it('shows the hunt progress table data without percentages', () => { + it('Sets the Table tab as default', () => { const fixture = TestBed.createComponent(HuntProgress); fixture.detectChanges(); - huntPageGlobalStore.mockedObservables.selectedHunt$.next(newHunt({ - allClientsCount: BigInt(0), - })); - huntPageGlobalStore.mockedObservables.huntProgress$.next({ startPoints: [ { @@ -218,70 +295,316 @@ describe('HuntProgress Component', () => { fixture.detectChanges(); - const table = fixture.nativeElement.querySelector( - 'app-hunt-progress-table mat-table'); + // MatTabGroupHarness does not detect the tabs for some reason: + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(2); - expect(table).not.toBeNull(); + expect(tabs[0].textContent).toContain('Chart'); + expect(tabs[1].textContent).toContain('Table'); - const rows = - table.querySelectorAll('app-hunt-progress-table mat-table mat-row'); + expect(tabs[1].getAttribute('aria-selected')).toEqual('true'); - expect(rows.length).toEqual(1); - - const cells = rows[0].querySelectorAll('mat-cell'); - - expect(cells[0].textContent).toContain('2023-03-09 16:43:20 UTC'); - expect(cells[1].innerText).toEqual('5'); - expect(cells[2].innerText).toEqual('10'); + expect(fixture.nativeElement.querySelector('app-hunt-progress-table')) + .not.toBeNull(); }); - it('shows the hunt progress table data with percentages', () => { + it('Sets the Chart tab as default', () => { const fixture = TestBed.createComponent(HuntProgress); fixture.detectChanges(); - huntPageGlobalStore.mockedObservables.selectedHunt$.next(newHunt({ - allClientsCount: BigInt(10), - })); - huntPageGlobalStore.mockedObservables.huntProgress$.next({ startPoints: [ { xValue: 1678379900, yValue: 10, }, + { + xValue: 1678379910, + yValue: 25, + }, + { + xValue: 1678379920, + yValue: 30, + }, ], completePoints: [ { xValue: 1678379900, yValue: 5, }, + { + xValue: 1678379910, + yValue: 20, + }, + { + xValue: 1678379920, + yValue: 30, + }, ], }); fixture.detectChanges(); - const table = fixture.nativeElement.querySelector( - 'app-hunt-progress-table mat-table'); + // MatTabGroupHarness does not detect the tabs for some reason: + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(2); - expect(table).not.toBeNull(); + expect(tabs[0].textContent).toContain('Chart'); + expect(tabs[1].textContent).toContain('Table'); - const rows = - table.querySelectorAll('app-hunt-progress-table mat-table mat-row'); + expect(tabs[0].getAttribute('aria-selected')).toEqual('true'); - expect(rows.length).toEqual(1); + expect(fixture.nativeElement.querySelector('app-hunt-progress-chart')) + .not.toBeNull(); + }); - const cells = rows[0].querySelectorAll('mat-cell'); + it('Selected tab does not change after the hunt progress is updated with "plotable" data', + () => { + const fixture = TestBed.createComponent(HuntProgress); + fixture.detectChanges(); - expect(cells[0].textContent).toContain('2023-03-09 16:43:20 UTC'); - expect(cells[1].textContent.trim()).toEqual('5 (50%)'); - expect(cells[2].textContent.trim()).toEqual('10 (100%)'); - }); + huntPageGlobalStore.mockedObservables.huntProgress$.next({ + startPoints: [ + { + xValue: 1678379900, + yValue: 10, + }, + ], + completePoints: [ + { + xValue: 1678379900, + yValue: 5, + }, + ], + }); - it('Groups multiple data-points into one, as they are within 5 minutes', + fixture.detectChanges(); + + // MatTabGroupHarness does not detect the tabs for some reason: + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(2); + + expect(tabs[0].textContent).toContain('Chart'); + expect(tabs[1].textContent).toContain('Table'); + + expect(tabs[0].getAttribute('aria-selected')).toEqual('false'); + expect(tabs[1].getAttribute('aria-selected')).toEqual('true'); + + expect(fixture.nativeElement.querySelector('app-hunt-progress-table')) + .not.toBeNull(); + + huntPageGlobalStore.mockedObservables.huntProgress$.next({ + startPoints: [ + { + xValue: 1678379900, + yValue: 10, + }, + { + xValue: 1678379910, + yValue: 25, + }, + { + xValue: 1678379920, + yValue: 30, + }, + ], + completePoints: [ + { + xValue: 1678379900, + yValue: 5, + }, + { + xValue: 1678379910, + yValue: 20, + }, + { + xValue: 1678379920, + yValue: 30, + }, + ], + }); + + fixture.detectChanges(); + + expect(tabs[0].getAttribute('aria-selected')).toEqual('false'); + expect(tabs[1].getAttribute('aria-selected')).toEqual('true'); + }); + }); + + describe('Hunt progress table', () => { + it('does not show the hunt progress table data when there is no hunt', + () => { + const fixture = TestBed.createComponent(HuntProgress); + fixture.detectChanges(); + + huntPageGlobalStore.mockedObservables.selectedHunt$.next(null); + + huntPageGlobalStore.mockedObservables.huntProgress$.next({ + startPoints: [ + { + xValue: 1678379900, + yValue: 10, + }, + ], + completePoints: [ + { + xValue: 1678379900, + yValue: 5, + }, + ], + }); + + fixture.detectChanges(); + + expect(fixture.nativeElement.querySelector( + 'app-hunt-progress-table mat-table')) + .toBeNull(); + }); + + it('does not show the hunt progress table data when there is no hunt progress data', () => { const fixture = TestBed.createComponent(HuntProgress); fixture.detectChanges(); + huntPageGlobalStore.mockedObservables.selectedHunt$.next(newHunt({ + allClientsCount: BigInt(0), + })); + + huntPageGlobalStore.mockedObservables.huntProgress$.next({ + startPoints: [], + completePoints: [], + }); + + fixture.detectChanges(); + + expect(fixture.nativeElement.querySelector( + 'app-hunt-progress-table mat-table')) + .toBeNull(); + }); + + it('shows the hunt progress table data without percentages', + fakeAsync(() => { + const fixture = TestBed.createComponent(HuntProgress); + fixture.detectChanges(); + + huntPageGlobalStore.mockedObservables.selectedHunt$.next(newHunt({ + allClientsCount: BigInt(0), + })); + + huntPageGlobalStore.mockedObservables.huntProgress$.next({ + startPoints: [ + { + xValue: 1678379900, + yValue: 10, + }, + ], + completePoints: [ + { + xValue: 1678379900, + yValue: 5, + }, + ], + }); + + fixture.detectChanges(); + + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(2); + + expect(tabs[1].textContent).toContain('Table'); + tabs[1].click(); + + fixture.detectChanges(); + + // We let the Angular Material tab change happen + tick(); + + fixture.detectChanges(); + + expect(tabs[1].getAttribute('aria-selected')).toEqual('true'); + + const table = fixture.nativeElement.querySelector( + 'app-hunt-progress-table mat-table'); + + expect(table).not.toBeNull(); + + const rows = table.querySelectorAll( + 'app-hunt-progress-table mat-table mat-row'); + + expect(rows.length).toEqual(1); + + const cells = rows[0].querySelectorAll('mat-cell'); + + expect(cells[0].textContent).toContain('2023-03-09 16:43:20 UTC'); + expect(cells[1].innerText).toEqual('5'); + expect(cells[2].innerText).toEqual('10'); + + discardPeriodicTasks(); + })); + + it('shows the hunt progress table data with percentages', fakeAsync(() => { + const fixture = TestBed.createComponent(HuntProgress); + fixture.detectChanges(); + + huntPageGlobalStore.mockedObservables.selectedHunt$.next(newHunt({ + allClientsCount: BigInt(10), + })); + + huntPageGlobalStore.mockedObservables.huntProgress$.next({ + startPoints: [ + { + xValue: 1678379900, + yValue: 10, + }, + ], + completePoints: [ + { + xValue: 1678379900, + yValue: 5, + }, + ], + }); + + fixture.detectChanges(); + + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(2); + + expect(tabs[1].textContent).toContain('Table'); + tabs[1].click(); + + fixture.detectChanges(); + + // We let the Angular Material tab change happen + tick(); + + fixture.detectChanges(); + + expect(tabs[1].getAttribute('aria-selected')).toEqual('true'); + const table = fixture.nativeElement.querySelector( + 'app-hunt-progress-table mat-table'); + + expect(table).not.toBeNull(); + + const rows = table.querySelectorAll( + 'app-hunt-progress-table mat-table mat-row'); + + expect(rows.length).toEqual(1); + + const cells = rows[0].querySelectorAll('mat-cell'); + + expect(cells[0].textContent).toContain('2023-03-09 16:43:20 UTC'); + expect(cells[1].textContent.trim()).toEqual('5 (50%)'); + expect(cells[2].textContent.trim()).toEqual('10 (100%)'); + + discardPeriodicTasks(); + })); + + it('Groups multiple data-points into one, as they are within 5 minutes', + fakeAsync(() => { + const fixture = TestBed.createComponent(HuntProgress); + fixture.detectChanges(); + huntPageGlobalStore.mockedObservables.selectedHunt$.next(newHunt({ allClientsCount: BigInt(30), })); @@ -319,6 +642,21 @@ describe('HuntProgress Component', () => { fixture.detectChanges(); + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(2); + + expect(tabs[1].textContent).toContain('Table'); + tabs[1].click(); + + fixture.detectChanges(); + + // We let the Angular Material tab change happen + tick(); + + fixture.detectChanges(); + + expect(tabs[1].getAttribute('aria-selected')).toEqual('true'); + const table = fixture.nativeElement.querySelector( 'app-hunt-progress-table mat-table'); @@ -334,10 +672,12 @@ describe('HuntProgress Component', () => { expect(cells[0].textContent).toContain('2023-03-09 16:43:20 UTC'); expect(cells[1].textContent.trim()).toEqual('30 (100%)'); expect(cells[2].textContent.trim()).toEqual('30 (100%)'); - }); + + discardPeriodicTasks(); + })); it('Groups multiple data-points into 2 groups, as they are not within 5 minutes', - () => { + fakeAsync(() => { const fixture = TestBed.createComponent(HuntProgress); fixture.detectChanges(); @@ -378,6 +718,21 @@ describe('HuntProgress Component', () => { fixture.detectChanges(); + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(2); + + expect(tabs[1].textContent).toContain('Table'); + tabs[1].click(); + + fixture.detectChanges(); + + // We let the Angular Material tab change happen + tick(); + + fixture.detectChanges(); + + expect(tabs[1].getAttribute('aria-selected')).toEqual('true'); + const table = fixture.nativeElement.querySelector( 'app-hunt-progress-table mat-table'); @@ -399,80 +754,100 @@ describe('HuntProgress Component', () => { expect(rowCells[0].textContent).toContain('2023-03-09 19:28:20 UTC'); expect(rowCells[1].textContent.trim()).toEqual('30 (100%)'); expect(rowCells[2].textContent.trim()).toEqual('30 (100%)'); - }); - it('Does not group data-points, as none are within 5 minutes', () => { - const fixture = TestBed.createComponent(HuntProgress); - fixture.detectChanges(); + discardPeriodicTasks(); + })); - huntPageGlobalStore.mockedObservables.selectedHunt$.next(newHunt({ - allClientsCount: BigInt(30), - })); + it('Does not group data-points, as none are within 5 minutes', + fakeAsync(() => { + const fixture = TestBed.createComponent(HuntProgress); + fixture.detectChanges(); - huntPageGlobalStore.mockedObservables.huntProgress$.next({ - startPoints: [ - { - xValue: 1678369900, - yValue: 10, - }, - { - xValue: 1678379910, - yValue: 25, - }, - { - xValue: 1678389920, - yValue: 30, - }, - ], - completePoints: [ - { - xValue: 1678369900, - yValue: 5, - }, - { - xValue: 1678379910, - yValue: 20, - }, - { - xValue: 1678389920, - yValue: 30, - }, - ], - }); + huntPageGlobalStore.mockedObservables.selectedHunt$.next(newHunt({ + allClientsCount: BigInt(30), + })); - fixture.detectChanges(); + huntPageGlobalStore.mockedObservables.huntProgress$.next({ + startPoints: [ + { + xValue: 1678369900, + yValue: 10, + }, + { + xValue: 1678379910, + yValue: 25, + }, + { + xValue: 1678389920, + yValue: 30, + }, + ], + completePoints: [ + { + xValue: 1678369900, + yValue: 5, + }, + { + xValue: 1678379910, + yValue: 20, + }, + { + xValue: 1678389920, + yValue: 30, + }, + ], + }); + + fixture.detectChanges(); - const table = fixture.nativeElement.querySelector( - 'app-hunt-progress-table mat-table'); + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(2); - expect(table).not.toBeNull(); + expect(tabs[1].textContent).toContain('Table'); + tabs[1].click(); - const rows = - table.querySelectorAll('app-hunt-progress-table mat-table mat-row'); + fixture.detectChanges(); - expect(rows.length).toEqual(3); + // We let the Angular Material tab change happen + tick(); - let rowCells = rows[0].querySelectorAll('mat-cell'); + fixture.detectChanges(); - expect(rowCells[0].textContent).toContain('2023-03-09 13:56:40 UTC'); - expect(rowCells[1].textContent.trim()).toEqual('5 (16%)'); - expect(rowCells[2].textContent.trim()).toEqual('10 (33%)'); + expect(tabs[1].getAttribute('aria-selected')).toEqual('true'); - rowCells = rows[1].querySelectorAll('mat-cell'); + const table = fixture.nativeElement.querySelector( + 'app-hunt-progress-table mat-table'); - expect(rowCells[0].textContent).toContain('2023-03-09 16:41:40 UTC'); - expect(rowCells[1].textContent.trim()).toEqual('20 (66%)'); - expect(rowCells[2].textContent.trim()).toEqual('25 (83%)'); + expect(table).not.toBeNull(); - rowCells = rows[2].querySelectorAll('mat-cell'); + const rows = table.querySelectorAll( + 'app-hunt-progress-table mat-table mat-row'); - expect(rowCells[0].textContent).toContain('2023-03-09 19:26:40 UTC'); - expect(rowCells[1].textContent.trim()).toEqual('30 (100%)'); - expect(rowCells[2].textContent.trim()).toEqual('30 (100%)'); - }); + expect(rows.length).toEqual(3); + + let rowCells = rows[0].querySelectorAll('mat-cell'); + + expect(rowCells[0].textContent).toContain('2023-03-09 13:56:40 UTC'); + expect(rowCells[1].textContent.trim()).toEqual('5 (16%)'); + expect(rowCells[2].textContent.trim()).toEqual('10 (33%)'); + + rowCells = rows[1].querySelectorAll('mat-cell'); + + expect(rowCells[0].textContent).toContain('2023-03-09 16:41:40 UTC'); + expect(rowCells[1].textContent.trim()).toEqual('20 (66%)'); + expect(rowCells[2].textContent.trim()).toEqual('25 (83%)'); + + rowCells = rows[2].querySelectorAll('mat-cell'); + + expect(rowCells[0].textContent).toContain('2023-03-09 19:26:40 UTC'); + expect(rowCells[1].textContent.trim()).toEqual('30 (100%)'); + expect(rowCells[2].textContent.trim()).toEqual('30 (100%)'); + + discardPeriodicTasks(); + })); it('Displays the available information in case of uneven completed and started progress information', - () => { + fakeAsync(() => { const fixture = TestBed.createComponent(HuntProgress); fixture.detectChanges(); @@ -509,6 +884,21 @@ describe('HuntProgress Component', () => { fixture.detectChanges(); + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(2); + + expect(tabs[1].textContent).toContain('Table'); + tabs[1].click(); + + fixture.detectChanges(); + + // We let the Angular Material tab change happen + tick(); + + fixture.detectChanges(); + + expect(tabs[1].getAttribute('aria-selected')).toEqual('true'); + const table = fixture.nativeElement.querySelector( 'app-hunt-progress-table mat-table'); @@ -530,6 +920,85 @@ describe('HuntProgress Component', () => { expect(rowCells[0].textContent).toContain('2023-03-09 19:28:20 UTC'); expect(rowCells[1].textContent.trim()).toEqual(''); expect(rowCells[2].textContent.trim()).toEqual('30 (100%)'); + + discardPeriodicTasks(); + })); + }); + + describe('Hunt progress chart', () => { + it('shows the progress chart when there is enough hunt progress data', + () => { + const fixture = TestBed.createComponent(HuntProgress); + fixture.detectChanges(); + + huntPageGlobalStore.mockedObservables.selectedHunt$.next(newHunt({ + allClientsCount: BigInt(0), + })); + + huntPageGlobalStore.mockedObservables.huntProgress$.next({ + startPoints: [ + { + xValue: 1678379900, + yValue: 10, + }, + { + xValue: 1679379900, + yValue: 15, + }, + { + xValue: 1680379900, + yValue: 30, + }, + ], + completePoints: [ + { + xValue: 1678379900, + yValue: 5, + }, + { + xValue: 1679379900, + yValue: 10, + }, + { + xValue: 1680379900, + yValue: 20, + }, + ], + }); + + fixture.detectChanges(); + + // MatTabGroupHarness does not detect the tabs for some reason: + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(2); + + expect(tabs[0].textContent).toContain('Chart'); + + // MatTabGroupHarness does not detect the tabs for some reason: + expect(fixture.nativeElement.querySelector('app-hunt-progress-chart')) + .not.toBeNull(); + expect( + fixture.nativeElement.querySelector('app-hunt-progress-chart svg')) + .not.toBeNull(); + }); + + it('does not show the hunt progress chart when there is no progress data', + () => { + const fixture = TestBed.createComponent(HuntProgress); + fixture.detectChanges(); + + huntPageGlobalStore.mockedObservables.selectedHunt$.next(null); + + huntPageGlobalStore.mockedObservables.huntProgress$.next({ + startPoints: [], + completePoints: [], + }); + + fixture.detectChanges(); + + // MatTabGroupHarness does not detect the tabs for some reason: + const tabs = fixture.nativeElement.querySelectorAll('.mdc-tab'); + expect(tabs.length).toEqual(0); }); }); }); diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/module.ts b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/module.ts index fef338f691..cae3b8c784 100644 --- a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/module.ts +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress/module.ts @@ -1,12 +1,15 @@ import {CommonModule} from '@angular/common'; import {NgModule} from '@angular/core'; +import {MatProgressSpinnerModule} from '@angular/material/progress-spinner'; +import {MatTabsModule} from '@angular/material/tabs'; import {MatTooltipModule} from '@angular/material/tooltip'; import {HelpersModule} from '../../../flow_details/helpers/module'; import {HuntArguments} from '../../hunt_arguments/hunt_arguments'; import {HuntFlowArguments} from '../../hunt_flow_arguments/hunt_flow_arguments'; import {HuntOriginalReference} from '../../hunt_original_reference/hunt_original_reference'; +import {HuntProgressChart} from '../hunt_progress_chart/hunt_progress_chart'; import {HuntProgressTable} from '../hunt_progress_table/hunt_progress_table'; import {HuntProgress} from './hunt_progress'; @@ -22,7 +25,10 @@ import {HuntProgress} from './hunt_progress'; HuntArguments, HuntFlowArguments, HuntOriginalReference, + HuntProgressChart, HuntProgressTable, + MatProgressSpinnerModule, + MatTabsModule, MatTooltipModule, // keep-sorted end // clang-format on diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.ng.html b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.ng.html new file mode 100644 index 0000000000..fe4608696b --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.ng.html @@ -0,0 +1,7 @@ +
+
+ + +

There is no progress data to show.

+
+
diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.scss b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.scss new file mode 100644 index 0000000000..423f765f9c --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.scss @@ -0,0 +1,9 @@ +.hunt-progress-chart-container { + padding-bottom: 2rem; + + .no-data { + margin: auto; + padding-top: 2rem; + text-align: center; + } +} diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.ts b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.ts new file mode 100644 index 0000000000..03a1c79e98 --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart.ts @@ -0,0 +1,192 @@ +import {CommonModule} from '@angular/common'; +import {AfterViewInit, ChangeDetectorRef, Component, ElementRef, Input, OnDestroy, ViewChild} from '@angular/core'; +import {MatTooltipModule} from '@angular/material/tooltip'; +import * as d3 from 'd3'; + +import {ChartLegend, ChartLegendConfiguration} from '../../../../lib/dataviz/chart_legend'; +import {BaseLineChartDataset, LineChart, LineChartConfiguration, LineChartDatapoint} from '../../../../lib/dataviz/line_chart'; +import {isNonNull, isNull} from '../../../../lib/preconditions'; +import {TimestampModule} from '../../../timestamp/module'; + +const COMPLETED_CLIENTS_CHART_COLOR = '#6DD58C'; +const IN_PROGRESS_CLIENTS_CHART_COLOR = '#C4EED0'; + +/** Provides client completion progress data for a Hunt in chart format. */ +@Component({ + selector: 'app-hunt-progress-chart', + templateUrl: './hunt_progress_chart.ng.html', + styleUrls: ['./hunt_progress_chart.scss'], + standalone: true, + imports: [ + CommonModule, + MatTooltipModule, + TimestampModule, + ], +}) +export class HuntProgressChart implements AfterViewInit, OnDestroy { + @Input() + set chartProgressData(chartData: HuntProgressLineChartDataset|null| + undefined) { + this.chartData = chartData; + + if (!this.templateIsReady || !this.hasDataForChart) return; + + if (isNull(this.huntProgressChart)) { + this.renderLineChart(); + } else { + this.huntProgressChart.updateChartDataset(this.chartData!); + } + } + + @ViewChild('progressChartContainer') + private readonly progressChartContainerRef!: ElementRef; + + private templateIsReady = false; + private chartData: HuntProgressLineChartDataset|null|undefined; + + huntProgressChart: HuntProgressChartD3|undefined; + + get hasDataForChart(): boolean { + if (isNull(this.chartData)) return false; + + // We need at least 2 datapoints in a series in order to render a line: + return this.chartData.completedClients.length >= 2 || + this.chartData.inProgressClients.length >= 2; + } + + constructor(private readonly cdr: ChangeDetectorRef) {} + + ngAfterViewInit() { + this.templateIsReady = true; + + if (!this.hasDataForChart) return; + + this.renderLineChart(); + + /** + * Angular doesn't like when values used in the template (in this + * case `*ngIf="!huntProgressChart"`) change inside ngAfterViewInit() + * lifecycle-hook. If this happens, Angular will throw an + * ExpressionChangedAfterItHasBeenCheckedError runtime error (only in Dev. + * mode). + * + * In this case, it is OK for us to wait to the template to be ready before + * rendering the line chart (reason for using ngAfterViewInit). The drawback + * of this is that the template condition `*ngIf="!huntProgressChart"` will + * trigger the ExpressionChangedAfterItHasBeenCheckedError runtime error in + * Dev. mode. + * + * As a solution, we manually run change detection after initializing + * `this.huntProgressChart`. Another possible solution would be to call + * `this.huntProgressChart = new HuntProgressChartD3(...)` asynchronously + * inside a setTimeout(). + * + * For more info: + * https://hackernoon.com/everything-you-need-to-know-about-the-expressionchangedafterithasbeencheckederror-error-e3fd9ce7dbb4 + */ + this.cdr.detectChanges(); + } + + ngOnDestroy() { + this.huntProgressChart?.removeEventListeners(); + } + + private renderLineChart(): void { + this.huntProgressChart = new HuntProgressChartD3( + this.progressChartContainerRef.nativeElement, + this.chartData!, + ); + } +} + +/** Data-structure to be consumed by the Hunt Progress Line Chart */ +export declare interface HuntProgressLineChartDataset extends + BaseLineChartDataset { + completedClients: LineChartDatapoint[]; + inProgressClients: LineChartDatapoint[]; +} + +class HuntProgressChartD3 { + private readonly lineChart: LineChart|undefined; + private readonly chartLegend: ChartLegend; + private readonly xScale: d3.ScaleLinear; + + constructor( + private readonly container: d3.BaseType, + private readonly lineChartDataset: HuntProgressLineChartDataset, + ) { + const chartContainerSelection = d3.select(this.container); + + const containerNode = chartContainerSelection.append('div').node()!; + + const chartLegendConfig: ChartLegendConfiguration = { + padding: { + topPx: 30, + rightPx: 20, + bottomPx: 20, + leftPx: 60, + }, + items: [ + { + label: 'Completed', + color: COMPLETED_CLIENTS_CHART_COLOR, + }, + { + label: 'In progress', + color: IN_PROGRESS_CLIENTS_CHART_COLOR, + }, + ], + }; + + this.chartLegend = new ChartLegend(containerNode, chartLegendConfig); + this.chartLegend.renderLegend(); + + // We want to share the xScale between charts, so it will live here and be + // consumed by the future bar-chart: + this.xScale = d3.scaleLinear(); + + const lineChartConfig: + LineChartConfiguration = { + scale: {x: this.xScale}, + sizing: { + padding: { + topPx: 20, + rightPx: 50, + bottomPx: 50, + leftPx: 60, + }, + rerenderOnResize: true, + }, + series: { + completedClients: { + color: COMPLETED_CLIENTS_CHART_COLOR, + isArea: true, + order: 2, + }, + inProgressClients: { + color: IN_PROGRESS_CLIENTS_CHART_COLOR, + isArea: true, + order: 1, + }, + } + }; + + this.lineChart = new LineChart( + containerNode, + this.lineChartDataset, + lineChartConfig, + ); + + this.lineChart.initialChartRender(); + } + + updateChartDataset(chartData: HuntProgressLineChartDataset): void { + if (isNonNull(this.lineChart)) { + this.lineChart.updateDataset(chartData); + } + } + + removeEventListeners(): void { + this.lineChart?.removeEventListeners(); + } +} \ No newline at end of file diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart_test.ts b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart_test.ts new file mode 100644 index 0000000000..5bc3ee8f3c --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_progress_chart/hunt_progress_chart_test.ts @@ -0,0 +1,163 @@ +import {Component} from '@angular/core'; +import {TestBed, waitForAsync} from '@angular/core/testing'; + +import {initTestEnvironment} from '../../../../testing'; + +import {HuntProgressChart, HuntProgressLineChartDataset} from './hunt_progress_chart'; + +initTestEnvironment(); + +@Component({ + template: ` + `, +}) +class TestHostComponent { + chartProgressData: HuntProgressLineChartDataset|null|undefined = null; + totalClients: bigint|null|undefined = null; +} + +describe('HuntProgressChart Component', () => { + beforeEach(waitForAsync(() => { + TestBed + .configureTestingModule({ + imports: [ + HuntProgressChart, + ], + declarations: [TestHostComponent], + teardown: {destroyAfterEach: false} + }) + .compileComponents(); + })); + + it('shows a message if hunt completion progress data is null', () => { + const fixture = TestBed.createComponent(TestHostComponent); + const hostComponentInstance = fixture.componentInstance; + + hostComponentInstance.chartProgressData = undefined; + + fixture.detectChanges(); + + const noDataBlock = fixture.nativeElement.querySelector( + '.hunt-progress-chart-container .no-data'); + + expect(noDataBlock).not.toBeNull(); + expect(noDataBlock.textContent) + .toEqual('There is no progress data to show.'); + }); + + it('shows a message if hunt completion progress data is empty', () => { + const fixture = TestBed.createComponent(TestHostComponent); + const hostComponentInstance = fixture.componentInstance; + + hostComponentInstance.chartProgressData = { + completedClients: [], + inProgressClients: [], + }; + + fixture.detectChanges(); + + const noDataBlock = fixture.nativeElement.querySelector( + '.hunt-progress-chart-container .no-data'); + + expect(noDataBlock).not.toBeNull(); + expect(noDataBlock.textContent) + .toEqual('There is no progress data to show.'); + }); + + it('shows a message if hunt completion progress data has only 1 data-point', + () => { + const fixture = TestBed.createComponent(TestHostComponent); + const hostComponentInstance = fixture.componentInstance; + + hostComponentInstance.chartProgressData = { + inProgressClients: [ + {x: 1669026900000, y: 0}, + ], + completedClients: [ + {x: 1669026900000, y: 0}, + ] + }; + + fixture.detectChanges(); + + const noDataBlock = fixture.nativeElement.querySelector( + '.hunt-progress-chart-container .no-data'); + + expect(noDataBlock).not.toBeNull(); + expect(noDataBlock.textContent) + .toEqual('There is no progress data to show.'); + }); + + it('shows a chart if hunt completion progress data is valid', () => { + const fixture = TestBed.createComponent(TestHostComponent); + const hostComponentInstance = fixture.componentInstance; + + const chartProgressDataMock: HuntProgressLineChartDataset = { + inProgressClients: [ + {x: 1669026900000, y: 0}, + {x: 1669026900000, y: 7}, + {x: 1669026900000, y: 29}, + ], + completedClients: [ + {x: 1669026900000, y: 0}, + {x: 1669026900000, y: 0}, + {x: 1669026900000, y: 0}, + ] + }; + + hostComponentInstance.chartProgressData = chartProgressDataMock; + + fixture.detectChanges(); + + const chartSvg = fixture.nativeElement.querySelectorAll( + '.hunt-progress-chart-container svg'); + + expect(chartSvg).not.toBeNull(); + }); + + it('shows a chart after the completion data gets updated', () => { + const fixture = TestBed.createComponent(TestHostComponent); + const hostComponentInstance = fixture.componentInstance; + + hostComponentInstance.chartProgressData = { + completedClients: [], + inProgressClients: [], + }; + + fixture.detectChanges(); + + let noDataBlock = fixture.nativeElement.querySelector( + '.hunt-progress-chart-container .no-data'); + + expect(noDataBlock).not.toBeNull(); + expect(noDataBlock.textContent) + .toEqual('There is no progress data to show.'); + + const chartProgressDataMock: HuntProgressLineChartDataset = { + inProgressClients: [ + {x: 1669026900000, y: 0}, + {x: 1669026900000, y: 7}, + {x: 1669026900000, y: 29}, + ], + completedClients: [ + {x: 1669026900000, y: 0}, + {x: 1669026900000, y: 0}, + {x: 1669026900000, y: 0}, + ] + }; + + hostComponentInstance.chartProgressData = chartProgressDataMock; + + fixture.detectChanges(); + + const chartSvg = fixture.nativeElement.querySelectorAll( + '.hunt-progress-chart-container svg'); + + expect(chartSvg).not.toBeNull(); + noDataBlock = fixture.nativeElement.querySelector( + '.hunt-progress-chart-container .no-data'); + + expect(noDataBlock).toBeNull(); + }); +}); diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_status_chip/hunt_status_chip.ts b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_status_chip/hunt_status_chip.ts index 249b1a31c2..ef7c8bbc00 100644 --- a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_status_chip/hunt_status_chip.ts +++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_status_chip/hunt_status_chip.ts @@ -5,7 +5,7 @@ import {tap} from 'rxjs/operators'; import {toDurationUnit} from '../../../components/form/duration_input/duration_conversion'; import {ApiHuntStateReason} from '../../../lib/api/api_interfaces'; import {DateTime} from '../../../lib/date_time'; -import {Hunt, HuntState} from '../../../lib/models/hunt'; +import {type Hunt, HuntState} from '../../../lib/models/hunt'; import {GrrUser} from '../../../lib/models/user'; import {observeOnDestroy} from '../../../lib/reactive'; import {UserGlobalStore} from '../../../store/user_global_store'; diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/new_hunt/new_hunt_test.ts b/grr/server/grr_response_server/gui/ui/components/hunt/new_hunt/new_hunt_test.ts index f5d2b7dae9..d932892e18 100644 --- a/grr/server/grr_response_server/gui/ui/components/hunt/new_hunt/new_hunt_test.ts +++ b/grr/server/grr_response_server/gui/ui/components/hunt/new_hunt/new_hunt_test.ts @@ -119,12 +119,12 @@ describe('new hunt test', () => { newHuntLocalStore.mockedObservables.originalHunt$.next(null); newHuntLocalStore.mockedObservables.flowWithDescriptor$.next({ flow: newFlow({ - name: 'KeepAlive', + name: 'Kill', creator: 'morty', }), descriptor: { - name: 'KeepAlive', - friendlyName: 'KeepAlive', + name: 'Kill', + friendlyName: 'Kill GRR agent process', category: 'a', defaultArgs: {}, }, @@ -157,12 +157,12 @@ describe('new hunt test', () => { injectMockStore(NewHuntLocalStore, fixture.debugElement); newHuntLocalStore.mockedObservables.flowWithDescriptor$.next({ flow: newFlow({ - name: 'KeepAlive', + name: 'Kill', creator: 'morty', }), descriptor: { - name: 'KeepAlive', - friendlyName: 'KeepAlive', + name: 'Kill', + friendlyName: 'Kill GRR agent process', category: 'a', defaultArgs: {}, }, @@ -175,7 +175,7 @@ describe('new hunt test', () => { .query(By.css('.config')); const text = flowSection.nativeElement.textContent; expect(text).toContain('morty'); - expect(text).toContain('KeepAlive'); + expect(text).toContain('Kill'); }); it('displays flow from original hunt', async () => { @@ -187,7 +187,7 @@ describe('new hunt test', () => { injectMockStore(NewHuntLocalStore, fixture.debugElement); newHuntLocalStore.mockedObservables.originalHunt$.next(newHunt({ huntId: 'H1234', - flowName: 'KeepAlive', + flowName: 'Kill', })); fixture.detectChanges(); @@ -196,7 +196,7 @@ describe('new hunt test', () => { .query(By.css('.config')); const text = flowSection.nativeElement.textContent; expect(text).toContain('Flow arguments'); - expect(text).toContain('KeepAlive'); + expect(text).toContain('Kill'); }); it('loads and displays hunt params', async () => { @@ -335,12 +335,12 @@ describe('new hunt test', () => { injectMockStore(NewHuntLocalStore, fixture.debugElement); newHuntLocalStore.mockedObservables.flowWithDescriptor$.next({ flow: newFlow({ - name: 'KeepAlive', + name: 'Kill', creator: 'morty', }), descriptor: { - name: 'KeepAlive', - friendlyName: 'KeepAlive', + name: 'Kill', + friendlyName: 'Kill GRR agent process', category: 'a', defaultArgs: {}, }, @@ -367,7 +367,7 @@ describe('new hunt test', () => { injectMockStore(NewHuntLocalStore, fixture.debugElement); newHuntLocalStore.mockedObservables.originalHunt$.next(newHunt({ huntId: 'H1234', - flowName: 'KeepAlive', + flowName: 'Kill', })); newHuntLocalStore.mockedObservables.flowWithDescriptor$.next(null); fixture.detectChanges(); diff --git a/grr/server/grr_response_server/gui/ui/lib/api/api_interfaces.ts b/grr/server/grr_response_server/gui/ui/lib/api/api_interfaces.ts index 474373bb58..52828ce533 100644 --- a/grr/server/grr_response_server/gui/ui/lib/api/api_interfaces.ts +++ b/grr/server/grr_response_server/gui/ui/lib/api/api_interfaces.ts @@ -188,15 +188,6 @@ export declare interface ApiClient { readonly rrgArgs?: readonly string[]; } -/** ApiClientActionRequest proto mapping. */ -export declare interface ApiClientActionRequest { - readonly taskId?: ProtoUint64; - readonly leasedUntil?: RDFDatetime; - readonly sessionId?: RDFURN; - readonly clientAction?: string; - readonly responses?: readonly GrrMessage[]; -} - /** ApiClientApproval proto mapping. */ export declare interface ApiClientApproval { readonly subject?: ApiClient; @@ -241,7 +232,6 @@ export declare interface ApiCountHuntResultsByTypeResult { export declare interface ApiCreateClientApprovalArgs { readonly clientId?: string; readonly approval?: ApiClientApproval; - readonly keepClientAlive?: boolean; } /** ApiCreateCronJobApprovalArgs proto mapping. */ @@ -1313,17 +1303,6 @@ export declare interface ApiListArtifactsResult { readonly totalCount?: ProtoInt64; } -/** ApiListClientActionRequestsArgs proto mapping. */ -export declare interface ApiListClientActionRequestsArgs { - readonly clientId?: string; - readonly fetchResponses?: boolean; -} - -/** ApiListClientActionRequestsResult proto mapping. */ -export declare interface ApiListClientActionRequestsResult { - readonly items?: readonly ApiClientActionRequest[]; -} - /** ApiListClientApprovalsArgs proto mapping. */ export declare interface ApiListClientApprovalsArgs { readonly offset?: ProtoInt64; @@ -2685,19 +2664,22 @@ export declare interface DiskVolumeInfoArgs { readonly pathtype?: PathSpecPathType; } +/** DummyArgs proto mapping. */ +export declare interface DummyArgs { + readonly flowInput?: string; +} + +/** DummyFlowResult proto mapping. */ +export declare interface DummyFlowResult { + readonly flowOutput?: string; +} + /** DumpACPITableArgs proto mapping. */ export declare interface DumpACPITableArgs { readonly logging?: boolean; readonly tableSignatureList?: readonly string[]; } -/** DumpEfiImageResponse proto mapping. */ -export declare interface DumpEfiImageResponse { - readonly eficheckVersion?: string; - readonly path?: PathSpec; - readonly response?: ExecuteBinaryResponse; -} - /** DumpFlashImageArgs proto mapping. */ export declare interface DumpFlashImageArgs { readonly logLevel?: ProtoUint32; @@ -2712,29 +2694,6 @@ export declare interface EdrAgent { readonly backendId?: string; } -/** EfiCollection proto mapping. */ -export declare interface EfiCollection { - readonly eficheckVersion?: string; - readonly bootRomVersion?: string; - readonly entries?: readonly EfiEntry[]; -} - -/** EfiEntry proto mapping. */ -export declare interface EfiEntry { - readonly volumeType?: ProtoUint32; - readonly address?: ProtoUint64; - readonly size?: ProtoUint32; - readonly guid?: string; - readonly hash?: string; - readonly flags?: ProtoUint32; - readonly index?: ProtoUint32; -} - -/** EficheckFlowArgs proto mapping. */ -export declare interface EficheckFlowArgs { - readonly cmdPath?: string; -} - /** EmbeddedRDFValue proto mapping. */ export declare interface EmbeddedRDFValue { readonly embeddedAge?: RDFDatetime; @@ -2968,38 +2927,6 @@ export declare interface FileFinderStatActionOptions { readonly collectExtAttrs?: boolean; } -/** FindFilesArgs proto mapping. */ -export declare interface FindFilesArgs { - readonly findspec?: FindSpec; -} - -/** FindSpec proto mapping. */ -export declare interface FindSpec { - readonly iterator?: Iterator; - readonly pathspec?: PathSpec; - readonly pathGlob?: GlobExpression; - readonly pathRegex?: string; - readonly dataRegex?: RDFBytes; - readonly startTime?: RDFDatetime; - readonly endTime?: RDFDatetime; - readonly crossDevs?: boolean; - readonly maxDepth?: ProtoInt32; - readonly hit?: StatEntry; - readonly maxData?: ProtoUint64; - readonly minFileSize?: ProtoUint64; - readonly maxFileSize?: ProtoUint64; - readonly permMask?: ProtoUint64; - readonly permMode?: ProtoUint64; - readonly uid?: ProtoUint64; - readonly gid?: ProtoUint64; - readonly collectExtAttrs?: boolean; -} - -/** FingerprintFileArgs proto mapping. */ -export declare interface FingerprintFileArgs { - readonly pathspec?: PathSpec; -} - /** FirefoxHistoryArgs proto mapping. */ export declare interface FirefoxHistoryArgs { readonly pathtype?: PathSpecPathType; @@ -3202,6 +3129,11 @@ export enum GUISettingsUIMode { DEBUG = 'DEBUG', } +/** GetCrowdstrikeAgentIdResult proto mapping. */ +export declare interface GetCrowdstrikeAgentIdResult { + readonly agentId?: string; +} + /** GetFileArgs proto mapping. */ export declare interface GetFileArgs { readonly pathspec?: PathSpec; @@ -3431,25 +3363,6 @@ export declare interface InterrogateArgs { readonly lightweight?: boolean; } -/** Iterator proto mapping. */ -export declare interface Iterator { - readonly clientState?: Dict; - readonly skip?: ProtoUint32; - readonly number?: ProtoUint32; - readonly state?: IteratorState; -} - -/** Iterator.State proto mapping. */ -export enum IteratorState { - RUNNING = 'RUNNING', - FINISHED = 'FINISHED', -} - -/** KeepAliveArgs proto mapping. */ -export declare interface KeepAliveArgs { - readonly duration?: DurationSeconds; -} - /** KeyValue proto mapping. */ export declare interface KeyValue { readonly k?: DataBlob; @@ -4004,16 +3917,6 @@ export enum SearchExpressionExpressionType { CONDITION = 'CONDITION', } -/** SendFileRequest proto mapping. */ -export declare interface SendFileRequest { - readonly pathspec?: PathSpec; - readonly addressFamily?: NetworkAddressFamily; - readonly host?: string; - readonly port?: ProtoUint64; - readonly key?: ProtoBytes; - readonly iv?: ProtoBytes; -} - /** SortOrder proto mapping. */ export declare interface SortOrder { readonly orderBy?: SortOrderOrderBy; @@ -4132,22 +4035,12 @@ export declare interface Uname { readonly pep425tag?: string; } -/** UninstallArgs proto mapping. */ -export declare interface UninstallArgs { - readonly kill?: boolean; -} - /** UnixVolume proto mapping. */ export declare interface UnixVolume { readonly mountPoint?: string; readonly options?: string; } -/** UpdateClientArgs proto mapping. */ -export declare interface UpdateClientArgs { - readonly binaryPath?: string; -} - /** UpdateConfigurationArgs proto mapping. */ export declare interface UpdateConfigurationArgs { readonly config?: Dict; @@ -4311,6 +4204,7 @@ export declare interface YaraProcessScanMatch { readonly process?: Process; readonly match?: readonly YaraMatch[]; readonly scanTimeUs?: ProtoUint64; + readonly pmiHash?: string; } /** YaraProcessScanMiss proto mapping. */ diff --git a/grr/server/grr_response_server/gui/ui/lib/api_translation/client.ts b/grr/server/grr_response_server/gui/ui/lib/api_translation/client.ts index 1a27add23d..a1bca140a6 100644 --- a/grr/server/grr_response_server/gui/ui/lib/api_translation/client.ts +++ b/grr/server/grr_response_server/gui/ui/lib/api_translation/client.ts @@ -34,11 +34,9 @@ function createAgentInfo(apiAgentInfo: apiInterfaces.ClientInformation): revision = BigInt(apiAgentInfo.revision); } - // TODO: Remove this workarond once build_time is a proper Date. let buildTime: number = NaN; if (apiAgentInfo.buildTime) { - buildTime = Date.parse(`${apiAgentInfo.buildTime} UTC`) || - Date.parse(apiAgentInfo.buildTime); + buildTime = Date.parse(apiAgentInfo.buildTime); } return { diff --git a/grr/server/grr_response_server/gui/ui/lib/api_translation/flow_test.ts b/grr/server/grr_response_server/gui/ui/lib/api_translation/flow_test.ts index 74a63837dd..29e96a5df5 100644 --- a/grr/server/grr_response_server/gui/ui/lib/api_translation/flow_test.ts +++ b/grr/server/grr_response_server/gui/ui/lib/api_translation/flow_test.ts @@ -16,7 +16,7 @@ describe('Flow API Translation', () => { const apiFlow: ApiFlow = { flowId: '1234', clientId: 'C.4567', - name: 'KeepAlive', + name: 'Kill', creator: 'morty', lastActiveAt: '1571789996681000', // 2019-10-23T00:19:56.681Z startedAt: '1571789996679000', // 2019-10-23T00:19:56.679Z @@ -27,7 +27,7 @@ describe('Flow API Translation', () => { const flow: Flow = { flowId: '1234', clientId: 'C.4567', - name: 'KeepAlive', + name: 'Kill', creator: 'morty', lastActiveAt: new Date(1571789996681), startedAt: new Date(1571789996679), @@ -46,7 +46,7 @@ describe('Flow API Translation', () => { const apiFlow: ApiFlow = { flowId: '1234', clientId: 'C.4567', - name: 'KeepAlive', + name: 'Kill', creator: 'morty', lastActiveAt: '1571789996681000', // 2019-10-23T00:19:56.681Z startedAt: '1571789996679000', // 2019-10-23T00:19:56.679Z diff --git a/grr/server/grr_response_server/gui/ui/lib/api_translation/result.ts b/grr/server/grr_response_server/gui/ui/lib/api_translation/result.ts index 4e537461b1..4746354f74 100644 --- a/grr/server/grr_response_server/gui/ui/lib/api_translation/result.ts +++ b/grr/server/grr_response_server/gui/ui/lib/api_translation/result.ts @@ -276,3 +276,9 @@ export const PAYLOAD_TYPE_TRANSLATION: { columns: EXECUTE_PYTHON_HACK_COLUMNS, } as PayloadTranslation, } as const; + +/** Maps PayloadType to corresponding translation information for Flows. */ +export const FLOW_PAYLOAD_TYPE_TRANSLATION: { + // Note: Not every PayloadType has a translation definition: + [key in PayloadType]?: PayloadTranslation<{[key: string]: ColumnDescriptor}> +} = {} as const; diff --git a/grr/server/grr_response_server/gui/ui/lib/dataviz/chart_legend.ts b/grr/server/grr_response_server/gui/ui/lib/dataviz/chart_legend.ts new file mode 100644 index 0000000000..9f7a08ebea --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/lib/dataviz/chart_legend.ts @@ -0,0 +1,103 @@ +import * as d3 from 'd3'; + +import {isNonNull} from '../preconditions'; + +import {DEFAULT_PADDING_PX, PaddingConfiguration, toCSSPaddingValue} from './padding'; + +declare interface LegendItemConfiguration { + label: string; + color: string; +} + +/** LegendOrientation Defines the */ +export enum LegendOrientation { + HORIZONTAL = 'HORIZONTAL', + VERTICAL = 'VERTICAL', +} + +/** Configuration object for the legend to be rendered. */ +export declare interface ChartLegendConfiguration { + orientation?: LegendOrientation; + padding?: PaddingConfiguration|number; + items: LegendItemConfiguration[]; +} + +const DEFAULT_ORIENTATION = LegendOrientation.HORIZONTAL; +const GAP_BETWEEN_ITEMS = '20px'; +const GAP_BETWEEN_SQUARE_AND_LABEL = '10px'; +const ITEM_SQUARE_SIZE = '20px'; + +/** Renders a basic Legend to provide more context for charts */ +export class ChartLegend { + private legendContainer?: + d3.Selection; + private readonly legendOrientation: LegendOrientation = DEFAULT_ORIENTATION; + private readonly legendPaddingPx: PaddingConfiguration = { + topPx: DEFAULT_PADDING_PX, + rightPx: DEFAULT_PADDING_PX, + bottomPx: DEFAULT_PADDING_PX, + leftPx: DEFAULT_PADDING_PX, + }; + + constructor( + private readonly container: Element, + private readonly configuration: ChartLegendConfiguration, + ) { + this.legendOrientation = + this.configuration.orientation ?? DEFAULT_ORIENTATION; + + if (isNonNull(this.configuration.padding)) { + if (typeof this.configuration.padding === 'number') { + this.legendPaddingPx = { + topPx: this.configuration.padding, + bottomPx: this.configuration.padding, + leftPx: this.configuration.padding, + rightPx: this.configuration.padding, + }; + } else { + this.legendPaddingPx = this.configuration.padding; + } + } + } + + renderLegend(): void { + this.legendContainer = d3.select(this.container) + .append('div') + .style('display', 'flex') + .style('gap', GAP_BETWEEN_ITEMS) + .attr('class', 'legend-container'); + + if (this.legendOrientation === LegendOrientation.HORIZONTAL) { + // 'row' is the default value for 'flex-direction', but we want to be + // explicit: + this.legendContainer.style('flex-direction', 'row'); + } + + if (this.legendOrientation === LegendOrientation.VERTICAL) { + this.legendContainer.style('flex-direction', 'column'); + } + + this.legendContainer.style( + 'padding', toCSSPaddingValue(this.legendPaddingPx)); + + this.configuration.items.forEach((item) => { + const itemContainer = this.legendContainer!.append('div') + .attr('class', 'legend-item') + .style('display', 'flex') + .style('align-items', 'center') + .style('gap', GAP_BETWEEN_SQUARE_AND_LABEL); + + // We append the coloured square + itemContainer.append('div') + .attr('class', 'legend-item-square') + .style('width', ITEM_SQUARE_SIZE) + .style('height', ITEM_SQUARE_SIZE) + .style('background-color', item.color); + + // We append the label + itemContainer.append('div') + .attr('class', 'legend-item-label') + .text(item.label); + }); + } +} diff --git a/grr/server/grr_response_server/gui/ui/lib/dataviz/chart_legend_test.ts b/grr/server/grr_response_server/gui/ui/lib/dataviz/chart_legend_test.ts new file mode 100644 index 0000000000..5886236c67 --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/lib/dataviz/chart_legend_test.ts @@ -0,0 +1,133 @@ +import {ChartLegend, ChartLegendConfiguration, LegendOrientation} from './chart_legend'; + +describe('ChartLegend', () => { + let testParentContainer: HTMLDivElement; + let chartLegend: ChartLegend; + + beforeEach(() => { + testParentContainer = document.createElement('div'); + document.body.appendChild(testParentContainer); + }); + + it('renders one legend item', () => { + const config: ChartLegendConfiguration = { + items: [ + { + label: 'Test', + color: 'red', + }, + ], + }; + + chartLegend = new ChartLegend(testParentContainer, config); + + chartLegend.renderLegend(); + + const legendContainer = + testParentContainer.querySelector('.legend-container'); + expect(legendContainer).not.toBeNull(); + + const legendItems = legendContainer!.querySelectorAll('.legend-item'); + expect(legendItems.length).toBe(1); + + const legendItemLabel = legendItems[0].querySelector('.legend-item-label'); + const legendItemSquare = + legendItems[0].querySelector('.legend-item-square'); + + expect(legendItemLabel!.textContent).toContain('Test'); + expect(legendItemSquare!.style.backgroundColor).toEqual('red'); + }); + + it('renders multiple legend items', () => { + const config: ChartLegendConfiguration = { + items: [ + { + label: 'Test', + color: 'red', + }, + { + label: 'Patata', + color: 'blue', + }, + { + label: 'Lorem Ipsum', + color: 'green', + }, + ], + }; + + chartLegend = new ChartLegend(testParentContainer, config); + + chartLegend.renderLegend(); + + const legendContainer = + testParentContainer.querySelector('.legend-container'); + expect(legendContainer).not.toBeNull(); + + const legendItems = legendContainer!.querySelectorAll('.legend-item'); + expect(legendItems.length).toBe(3); + + const legendItemLabels = + legendContainer!.querySelectorAll('.legend-item-label'); + const legendItemSquares = legendContainer!.querySelectorAll( + '.legend-item-square'); + + expect(legendItemLabels[0].textContent).toContain('Test'); + expect(legendItemSquares[0].style.backgroundColor).toEqual('red'); + + expect(legendItemLabels[1].textContent).toContain('Patata'); + expect(legendItemSquares[1].style.backgroundColor).toEqual('blue'); + + expect(legendItemLabels[2].textContent).toContain('Lorem Ipsum'); + expect(legendItemSquares[2].style.backgroundColor).toEqual('green'); + }); + + it('Does not render any legend items', () => { + const config: ChartLegendConfiguration = { + items: [], + }; + + chartLegend = new ChartLegend(testParentContainer, config); + + chartLegend.renderLegend(); + + const legendContainer = + testParentContainer.querySelector('.legend-container'); + expect(legendContainer).not.toBeNull(); + + const legendItems = legendContainer!.querySelectorAll('.legend-item'); + expect(legendItems.length).toBe(0); + }); + + it('renders legend items vertically', () => { + const config: ChartLegendConfiguration = { + orientation: LegendOrientation.VERTICAL, + items: [], + }; + + chartLegend = new ChartLegend(testParentContainer, config); + + chartLegend.renderLegend(); + + const legendContainer = + testParentContainer.querySelector('.legend-container'); + expect(legendContainer).not.toBeNull(); + expect(legendContainer!.style.flexDirection).toEqual('column'); + }); + + it('renders legend items horizontally', () => { + const config: ChartLegendConfiguration = { + items: [], + orientation: LegendOrientation.HORIZONTAL, + }; + + chartLegend = new ChartLegend(testParentContainer, config); + + chartLegend.renderLegend(); + + const legendContainer = + testParentContainer.querySelector('.legend-container'); + expect(legendContainer).not.toBeNull(); + expect(legendContainer!.style.flexDirection).toEqual('row'); + }); +}); diff --git a/grr/server/grr_response_server/gui/ui/lib/dataviz/line_chart.ts b/grr/server/grr_response_server/gui/ui/lib/dataviz/line_chart.ts new file mode 100644 index 0000000000..4a0324348d --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/lib/dataviz/line_chart.ts @@ -0,0 +1,568 @@ +import * as d3 from 'd3'; + +import {isNonNull, isNull} from '../preconditions'; + +import {DEFAULT_PADDING_PX, PaddingConfiguration} from './padding'; + +/** Data Structure defining a single data-point in the Line Chart */ +export declare interface LineChartDatapoint { + y: number; + x: number; +} + +/** + * Data structure definition that contains the different series to render in the + * chart. + * + * Example: + * interface NumbersLineChartDataset extends + * BaseLineChartDataset { + * one: LineChartDatapoint[]; + * two: LineChartDatapoint[]; + * } + */ +export declare type BaseLineChartDataset = Record; + +declare interface LineChartSizing { + /** If not specified, the chart will take its parent's width */ + widthPx?: number; + /** By default the height will be half of the width: */ + heightToWidthRatio?: number; + /** + * Spacing between the chart container element and the chart ploting space. + * This is useful to prevent Axis labels from being cropped involuntarily. + */ + padding?: PaddingConfiguration|number; + /** + * If true, the chart will listen to size changes on the parent HTML node + * and will resize itself to fit the entire width and always respecting the + * heightToWidth ratio. + */ + rerenderOnResize?: boolean; +} + +/** Configuration options for the Line Chart */ +export declare interface LineChartConfiguration< + LineChartDataset extends BaseLineChartDataset> { + scale?: + {x?: d3.ScaleLinear; y?: d3.ScaleLinear;}; + sizing?: LineChartSizing; + /** + * Each line/area can be configurted through this property. `SeriesKey` stands + * for the key used in the dataset dictionary object. For example in the + * following dataset, `SeriesKey` would effectively be 'lineOne' | 'lineTwo': + * + * const dataset = { + * lineOne: [{ x:1, y:3}, {x:2, y: 4}], + * lineTwo: [{ x:1, y:1}, {x:2, y: 3}], + * }; + * + * This allows us to know which line the configuration should apply to. + */ + series?: + {[SeriesKey in keyof LineChartDataset]?: LineChartSeriesConfiguration;}; +} + +/** Configuration object for each line/area in the chart. */ +declare interface LineChartSeriesConfiguration { + /** If not specified, "SeriesKey" will be used as the id. */ + id?: string; + /** Colors the area between the line and X-Axis. */ + color?: string; + /** If true it will render an area, otherwise only a line. */ + isArea?: boolean; + /** + * Determines the order (z-axis position relative to the user) of the line. + * The higher the order, the more in front the line/area will be. + * E.g. `1` is drawn first (so will be in the back), and + * `2` is drawn second (in front/on top of 1) + */ + order?: number; +} + +/** Defines the default transition time for line-chart animations */ +export const DEFAULT_TRANSITION_TIME_MS = 500; +// body-1 Angular Material typography level: +const DEFAULT_AXIS_LABEL_FRONT_SIZE = '14px'; +/** Defines the default height-to-width relationship of the chart */ +export const DEFAULT_HEIGHT_TO_WIDTH_RATIO = 1 / 2; +const AXIS_COLOR = '#757575'; // Mat. $grey-palette 600 +const AXIS_TICKS_PER_WIDTHPX_RATIO = 1 / 80; + +// https://github.com/d3/d3-time-format +const X_AXIS_LABEL_DATE_FORMAT = '%H:%M:%S'; +const X_AXIS_SUBLABEL_DATE_FORMAT = '%b %d'; +const X_AXIS_SUBLABEL_DATE_FORMAT_WITH_YEAR = '%b %d %Y'; +const X_AXIS_SUBLABEL_MARGIN_PX = 20; +const CURRENT_YEAR = new Date().getFullYear(); + +/** + * Note: We want to provide information about the date(s) of the Hunt in the + * chart without being redundant in every X Axis tick label. For this reason, + * the decided approach is to show the time (hh:mm:ss) in every tick label, + * and show the date in a subtitle label. Each date will only be added once, + * meaning that if a Hunt happens entirely in the same calendar day, that day + * will only be shown once below the first tick starting from the left. If a + * hunt spans multiple days, then multiple date subtitle labels will be + * shown, without being repeated. + */ +const toXAxisDateLabel = d3.timeFormat(X_AXIS_LABEL_DATE_FORMAT); +const toXAxisDateSubLabel = d3.timeFormat(X_AXIS_SUBLABEL_DATE_FORMAT); +const toXAxisDateSubLabelWithYear = d3.timeFormat( + X_AXIS_SUBLABEL_DATE_FORMAT_WITH_YEAR, +); + +const HEX_CHAR_RANGE = '0123456789ABCDEF'; +function generateRandomHexColor(): string { + let color = '#'; + + for (let i = 0; i < 6; i++) { + color += HEX_CHAR_RANGE[Math.floor(Math.random() * 16)]; + } + + return color; +} + +/** + * LineChart renders a line-chart in the given parent HTML node element, with + * the given configuration. + */ +export class LineChart { + private readonly parentNodeSelection: + d3.Selection; + private chartSvgContainer?: + d3.Selection; + + private readonly transitionDurationMs = DEFAULT_TRANSITION_TIME_MS; + + private containerWidthPx = 0; + private chartHeightPx = 0; + private chartWidthPx = 0; + private chartPaddingPx: PaddingConfiguration = { + topPx: DEFAULT_PADDING_PX, + rightPx: DEFAULT_PADDING_PX, + bottomPx: DEFAULT_PADDING_PX, + leftPx: DEFAULT_PADDING_PX, + }; + + private xScale?: d3.ScaleLinear; + private yScale?: d3.ScaleLinear; + private xAxis?: d3.Axis; + private yAxis?: d3.Axis; + + private resizeObserver?: ResizeObserver; + + get xAxisTopMarginPx(): number { + return this.chartHeightPx - this.chartPaddingPx.bottomPx; + } + + get yAxisLeftMarginPx(): number { + return this.chartPaddingPx.leftPx; + } + + /** + * If the viewport is small, we want less ticks as there might be overlap + * between the labels. For this reason we make the tick number dynamic for + * the X and Y Axis. + */ + get xAxisTicks(): number { + return this.chartPlotWidthPx * AXIS_TICKS_PER_WIDTHPX_RATIO; + } + + get yAxisTicks(): number { + const heightToWidthRatio = this.configuration?.sizing?.heightToWidthRatio ?? + DEFAULT_HEIGHT_TO_WIDTH_RATIO; + + const yAxisTicksPerHeightPxRatio = + AXIS_TICKS_PER_WIDTHPX_RATIO / heightToWidthRatio; + + return this.chartPlotHeightPx * yAxisTicksPerHeightPxRatio; + } + + get chartPlotWidthPx(): number { + return this.chartWidthPx - this.chartPaddingPx.leftPx - + this.chartPaddingPx.rightPx; + } + + get chartPlotHeightPx(): number { + return this.chartHeightPx - this.chartPaddingPx.topPx - + this.chartPaddingPx.bottomPx; + } + + constructor( + private readonly parentNode: Element, + private dataset: LineChartDataset, + private readonly configuration?: LineChartConfiguration, + ) { + this.setChartSize(parentNode, configuration?.sizing); + this.setChartPadding(configuration?.sizing?.padding); + + this.parentNodeSelection = + d3.select(this.parentNode); + + this.initializeScales(); + this.setAxisScales(); + this.setAxisTicks(); + } + + updateDataset(dataset: LineChartDataset): void { + this.dataset = dataset; + + this.recalculateScaleDomains(this.dataset); + this.updateBothAxis(); + + this.redrawLines(); + } + + // There is currently no way of detecting the destruction of a "pure" Class + // In JavaScript/TypeScript. Therefore we need to expose the following method: + removeEventListeners(): void { + if (isNonNull(this.resizeObserver)) { + this.resizeObserver.disconnect(); + } + } + + initialChartRender(): void { + this.renderChartElements(); + + this.setupEventListeners(); + } + + private renderChartElements(): void { + this.chartSvgContainer = this.parentNodeSelection.append('svg') + .attr('class', 'chart-container') + .attr('width', `${this.chartWidthPx}px`) + .attr('height', `${this.chartHeightPx}px`); + + this.recalculateScaleDomains(this.dataset); + this.renderPaths(); + this.renderBothAxis(); + } + + private renderPaths(): void { + const datasetKeys = this.getCurrentDatasetKeys(); + + const pathContainer = + this.chartSvgContainer!.append('g').attr('class', 'path-container'); + + // The rendering order of the "path" elements is relevant, as the latest + // sibling will always be shown in front of the previous sibling. + // For this reason, we render paths on a low-to-high order value: + const datasetKeysSortedByOrder = datasetKeys.sort((aKey, bKey) => { + return this.getLineOrder(aKey) - this.getLineOrder(bKey); + }); + + datasetKeysSortedByOrder.forEach(key => { + const color = this.getLineColor(key); + const lineId = this.getLineId(key); + + const path = pathContainer.append('path') + .datum(this.dataset[key]) + .attr('class', `series-path`) + .attr('id', lineId) + .style('stroke', color); + + if (this.getLineIsArea(key)) { // We want an area: + path.attr('d', this.getAreaGenerator()).style('fill', color); + } else { // We only want a line: + path.attr('d', this.getLineGenerator()).style('fill', 'none'); + } + }); + } + + private recalculateScaleDomains(dataset: LineChartDataset): void { + const allChartDatapoints = this.getDatasetDatapoints(dataset); + + const allXValues = allChartDatapoints.map(dp => dp.x); + const allYValues = allChartDatapoints.map(dp => dp.y); + + this.xScale!.domain([d3.min(allXValues)!, d3.max(allXValues)!]); + this.yScale!.domain([d3.min(allYValues)!, d3.max(allYValues)!]); + } + + private renderBothAxis(): void { + const axisContainer = this.chartSvgContainer!.append('g') + .attr('class', 'axis-container') + .style('color', AXIS_COLOR); + + axisContainer.append('g') + .attr('class', 'axis x-axis') + .style('font-size', DEFAULT_AXIS_LABEL_FRONT_SIZE) + .attr('transform', `translate(0, ${this.xAxisTopMarginPx})`) + .call(this.xAxis!) + .call(() => { + this.updateXAxisTickSubLabels(); + }); + + axisContainer.append('g') + .attr('class', 'axis y-axis') + .style('font-size', DEFAULT_AXIS_LABEL_FRONT_SIZE) + .attr('transform', `translate(${this.yAxisLeftMarginPx}, 0)`) + .call(this.yAxis!); + } + + private updateBothAxis(): void { + this.xAxis!.ticks(this.xAxisTicks); + this.yAxis!.ticks(this.yAxisTicks); + + this.selectAxis('x') + .transition(d3.transition().duration(this.transitionDurationMs)) + .attr('transform', `translate(0, ${this.xAxisTopMarginPx})`) + .call(this.xAxis!) + .on('end', () => { + // Note: We need to asynchronously trigger the function to add the + // dates to the X Axis labels, as old ticks are still present for some + // reason. Old ticks being present will cause the wrong dates + // being added (or no dates at all): + setTimeout(() => { + this.updateXAxisTickSubLabels(); + }); + }); + + this.selectAxis('y') + .transition(d3.transition().duration(this.transitionDurationMs)) + .call(this.yAxis!); + } + + /** + * Returns an Array containing all the Datapoints of the different series of + * the dataset, mainly for domain calculation purposes. + */ + private getDatasetDatapoints(dataset: LineChartDataset): + LineChartDatapoint[] { + const datasetKeys = d3.keys(dataset) as Array; + + return datasetKeys.map(key => dataset[key]).flat(); + } + + private initializeScales(): void { + this.xScale = this.configuration?.scale?.x ?? d3.scaleLinear(); + this.yScale = this.configuration?.scale?.y ?? d3.scaleLinear(); + + this.resetScalesRange(); + } + + /** + * We set the coordinate system for the Scale. This allows us to map + * line-series datapoints to plot-coordinates. + */ + private resetScalesRange(): void { + this.xScale!.range([ + this.chartPaddingPx.leftPx, + this.chartWidthPx - this.chartPaddingPx.rightPx + ]); + this.yScale!.range([ + this.chartHeightPx - this.chartPaddingPx.bottomPx, + this.chartPaddingPx.topPx + ]); + } + + private setAxisScales(): void { + this.xAxis = d3.axisBottom(this.xScale!) as d3.Axis; + this.yAxis = d3.axisLeft(this.yScale!) as d3.Axis; + } + + private setAxisTicks(): void { + this.xAxis!.ticks(this.xAxisTicks) + .tickSizeInner(-this.chartPlotHeightPx) + .tickFormat((timestamp => toXAxisDateLabel(new Date(timestamp)))); + + this.yAxis!.ticks(this.yAxisTicks); + } + + /** + * We add dates to X-Axis tick time labels. Each date will only be added once, + * meaning that if a Hunt happens entirely in the same calendar day, that day + * will only be shown once below the first tick starting from the left. If a + * hunt spans multiple days, then multiple date subtitle labels will be + * shown, without being repeated. + */ + private updateXAxisTickSubLabels(): void { + const alreadyAddedDates = new Set(); + + this.selectAxis('x').selectAll('.tick').each( + (tickTimestamp, i, nodes) => { + const tickSubtitleLabelFn = + new Date(tickTimestamp).getFullYear() < CURRENT_YEAR ? + toXAxisDateSubLabelWithYear : + toXAxisDateSubLabel; + + const tickSubtitleText = tickSubtitleLabelFn(new Date(tickTimestamp)); + + if (alreadyAddedDates.has(tickSubtitleText)) return; + + alreadyAddedDates.add(tickSubtitleText); + + const currentTick = d3.select(nodes[i]); + const currentTickLabel = currentTick.select('text'); + const currentTickSubLabel = currentTick.select('.tick-subtitle'); + + if (currentTickSubLabel.empty()) { + const tickSubLabelYPosition = + +currentTickLabel.attr('y') + X_AXIS_SUBLABEL_MARGIN_PX; + + currentTick.append('text') + .attr('class', 'tick-subtitle') + .text(tickSubtitleText) + .style('fill', 'currentColor') + .attr('y', tickSubLabelYPosition) + .attr('dy', currentTickLabel.attr('dy')); + } else { + currentTickSubLabel.text(tickSubtitleText); + } + }); + } + + private getCurrentDatasetKeys(): Array { + return d3.keys(this.dataset) as Array; + } + + private getLineGenerator(): d3.Line { + return d3 + .line() + // We filter out incomplete datapoints: + .defined(dp => isNonNull(dp.x) && isNonNull(dp.y)) + .x((dp) => this.xScale!(dp.x)) + .y((dp) => this.yScale!(dp.y)); + } + + private getAreaGenerator(): d3.Area { + const lowestYAxisValue = this.yScale!.domain()[0]; + const areaBottomBoundary = this.yScale!(lowestYAxisValue); + + return d3 + .area() + // We filter out incomplete datapoints: + .defined(dp => isNonNull(dp.x) && isNonNull(dp.y)) + .x((dp) => this.xScale!(dp.x)) + .y0(areaBottomBoundary) + .y1((dp) => this.yScale!(dp.y)); + } + + private selectLinePath(key: keyof LineChartDataset) { + return d3.select( + `.series-path#${this.getLineId(key)}`, + ); + } + + private selectAxis(axis: 'x'|'y') { + return d3.select(`.${axis}-axis`); + } + + private getElementWidthPx(element: Element): number { + return element.getBoundingClientRect().width; + } + + private setChartSize( + containerElement: Element, + config?: LineChartSizing, + ): void { + if (isNonNull(config?.widthPx)) { + this.chartWidthPx = config!.widthPx; + } else { + // If we don't specify a width explicitly, it will take the available one, + // that is, the width of its parent node: + this.containerWidthPx = this.getElementWidthPx(containerElement); + this.chartWidthPx = this.containerWidthPx; + } + + const heightWidthRatio = + config?.heightToWidthRatio ?? DEFAULT_HEIGHT_TO_WIDTH_RATIO; + + this.chartHeightPx = this.chartWidthPx * heightWidthRatio; + } + + private setChartPadding(padding?: PaddingConfiguration|number): void { + if (isNull(padding)) return; + + if (typeof padding === 'number') { + this.chartPaddingPx = { + topPx: padding, + bottomPx: padding, + leftPx: padding, + rightPx: padding, + }; + } else { + this.chartPaddingPx = padding; + } + } + + private setupEventListeners(): void { + if (this.configuration?.sizing?.rerenderOnResize) { + this.resizeObserver = new ResizeObserver((e) => { + const currentSizeConfig = this.configuration?.sizing || {}; + const containerWidth = this.getElementWidthPx(this.parentNode); + + // If the width of the container element didn't change, we do nothing: + if (containerWidth === this.containerWidthPx) return; + + this.containerWidthPx = containerWidth; + + const newChartSizeConfiguration: LineChartSizing = { + ...currentSizeConfig, + widthPx: this.containerWidthPx, + }; + + this.setChartSize(this.parentNode, newChartSizeConfiguration); + this.resetScalesRange(); + this.setAxisScales(); + this.setAxisTicks(); + this.redrawChart(); + }); + + // We listen to size changes of the chart's container element: + this.resizeObserver.observe(this.parentNode); + } + } + + /** + * This method assumes there already is a line chart rendered. It will + * redraw and transition the different elements of the chart (Axis's & + * lines/areas) based on the current dataset. + */ + private redrawChart(): void { + this.chartSvgContainer!.attr('width', `${this.chartWidthPx}px`) + .attr('height', `${this.chartHeightPx}px`); + + this.recalculateScaleDomains(this.dataset); + this.updateBothAxis(); + this.redrawLines(); + } + + private redrawLines(): void { + this.getCurrentDatasetKeys().forEach(key => { + this.selectLinePath(key) + // We set the new data for the line: + .datum(this.dataset[key]) + .transition(d3.transition().duration(this.transitionDurationMs)) + .attr( + 'd', + this.getLineIsArea(key) ? this.getAreaGenerator() : + this.getLineGenerator()); + }); + } + + private getLineId(key: keyof LineChartDataset): string { + const lineConfiguration = this.configuration?.series?.[key]; + + return lineConfiguration?.id ?? String(key); + } + + private getLineOrder(key: keyof LineChartDataset): number { + const lineConfiguration = this.configuration?.series?.[key]; + + return lineConfiguration?.order ?? 0; + } + + private getLineIsArea(key: keyof LineChartDataset): boolean { + const lineConfiguration = this.configuration?.series?.[key]; + + return lineConfiguration?.isArea ?? false; + } + + private getLineColor(key: keyof LineChartDataset): string { + const lineConfiguration = this.configuration?.series?.[key]; + + return lineConfiguration?.color ?? generateRandomHexColor(); + } +} \ No newline at end of file diff --git a/grr/server/grr_response_server/gui/ui/lib/dataviz/line_chart_test.ts b/grr/server/grr_response_server/gui/ui/lib/dataviz/line_chart_test.ts new file mode 100644 index 0000000000..26584706f6 --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/lib/dataviz/line_chart_test.ts @@ -0,0 +1,580 @@ +import {BaseLineChartDataset, DEFAULT_HEIGHT_TO_WIDTH_RATIO, DEFAULT_TRANSITION_TIME_MS, LineChart, LineChartDatapoint} from './line_chart'; + +interface TestLineChartDataset extends BaseLineChartDataset { + lineOne: LineChartDatapoint[]; +} + +const emptyTestData: TestLineChartDataset = { + lineOne: [], +}; + +const mockLine: LineChartDatapoint[] = [ + { + x: 10, + y: 20, + }, + { + x: 20, + y: 10, + }, +]; + +describe('LineChart', () => { + let testParentContainer: HTMLDivElement; + let testChart: LineChart; + + beforeEach(() => { + testParentContainer = document.createElement('div'); + document.body.appendChild(testParentContainer); + }); + + afterEach(() => { + testParentContainer.remove(); + }); + + describe('chart dimensions', () => { + it('renders a container SVG element with a width and height of 500px', + () => { + testChart = new LineChart( + testParentContainer, + emptyTestData, + { + sizing: { + widthPx: 500, + heightToWidthRatio: 1, + } + }, + ); + + testChart.initialChartRender(); + + const chartContainerElement = testParentContainer.querySelector('svg'); + expect(chartContainerElement).not.toBeNull(); + + expect(chartContainerElement!.getAttribute('width')).toEqual(`500px`); + + expect(chartContainerElement!.getAttribute('height')) + .toEqual( + `500px`, // 500 * 1 + ); + }); + + it('renders a container SVG element with a width of 500px and height of 250px', + () => { + testChart = new LineChart( + testParentContainer, + emptyTestData, + { + sizing: { + widthPx: 500, + heightToWidthRatio: 0.5, + } + }, + ); + + testChart.initialChartRender(); + + const chartContainerElement = testParentContainer.querySelector('svg'); + expect(chartContainerElement).not.toBeNull(); + + expect(chartContainerElement!.getAttribute('width')).toEqual(`500px`); + + expect(chartContainerElement!.getAttribute('height')) + .toEqual( + `250px`, // 500 * 0.5 + ); + }); + + it('renders a container SVG element the same width as its container', + () => { + const testWidth = 400; + + testParentContainer.style.width = `${testWidth}px`; + + testChart = new LineChart( + testParentContainer, + emptyTestData, + { + sizing: { + widthPx: undefined, + } + }, + ); + + testChart.initialChartRender(); + + const chartContainerElement = testParentContainer.querySelector('svg'); + expect(chartContainerElement).not.toBeNull(); + + expect(chartContainerElement!.getAttribute('width')) + .toEqual(`${testWidth}px`); + }); + + describe('padding', () => { + it('renders an svg without paddings and a plot area of the same width', + () => { + const testWidth = 400; + + testParentContainer.style.width = `${testWidth}px`; + + testChart = new LineChart( + testParentContainer, + { + lineOne: mockLine, + }, + { + sizing: { + widthPx: undefined, + padding: 0, + } + }, + ); + + testChart.initialChartRender(); + + const chartContainerElement = + testParentContainer.querySelector('svg.chart-container'); + expect(chartContainerElement).not.toBeNull(); + + expect(chartContainerElement!.getAttribute('width')) + .toEqual(`${testWidth}px`); + + const pathContainerElement = + testParentContainer.querySelector('g.path-container'); + expect(pathContainerElement).not.toBeNull(); + + expect(pathContainerElement!.getBoundingClientRect().width) + .toEqual(testWidth); + }); + + it('applies the indicated padding to the plot area container (width)', + () => { + const testWidth = 400; + const testHeight = testWidth * DEFAULT_HEIGHT_TO_WIDTH_RATIO; + const padding = 50; + + testParentContainer.style.width = `${testWidth}px`; + + testChart = new LineChart( + testParentContainer, + { + lineOne: mockLine, + }, + { + sizing: { + widthPx: undefined, + padding, + } + }, + ); + + testChart.initialChartRender(); + + const chartContainerElement = + testParentContainer.querySelector('svg.chart-container'); + expect(chartContainerElement).not.toBeNull(); + + expect(chartContainerElement!.getAttribute('width')) + .toEqual(`${testWidth}px`); + + const pathContainerElement = + testParentContainer.querySelector('g.path-container'); + expect(pathContainerElement).not.toBeNull(); + + // Width should equal parent width - leftPadding - rightPadding + expect(pathContainerElement!.getBoundingClientRect().width) + .toEqual(testWidth - padding * 2); + + // Height should equal to: + // parent width * heithToWidthRatio - leftPadding - rightPadding + expect(pathContainerElement!.getBoundingClientRect().height) + .toEqual(testHeight - padding * 2); + }); + + it('applies the indicated padding to the plot area container (width & height)', + () => { + const testWidth = 400; + const heightToWidthRatio = 1; + const testHeight = 400 * heightToWidthRatio; + const padding = { + topPx: 20, + rightPx: 50, + bottomPx: 10, + leftPx: 30, + }; + + testParentContainer.style.width = `${testWidth}px`; + + testChart = new LineChart( + testParentContainer, + { + lineOne: mockLine, + }, + { + sizing: { + widthPx: undefined, + heightToWidthRatio: 1, + padding, + } + }, + ); + + testChart.initialChartRender(); + + const chartContainerElement = + testParentContainer.querySelector('svg.chart-container'); + expect(chartContainerElement).not.toBeNull(); + + expect(chartContainerElement!.getAttribute('width')) + .toEqual(`${testWidth}px`); + + const pathContainerElement = + testParentContainer.querySelector('g.path-container'); + expect(pathContainerElement).not.toBeNull(); + + // Width should equal parent width - leftPadding - rightPadding + expect(pathContainerElement!.getBoundingClientRect().width) + .toEqual(testWidth - padding.leftPx - padding.rightPx); + + // Height should equal parent height - topPx - bottomPx + expect(pathContainerElement!.getBoundingClientRect().height) + .toEqual(testHeight - padding.topPx - padding.bottomPx); + }); + }); + + describe('automatic resize', () => { + it('reacts to parent node size changes and resizes the chart accordingly', + async () => { + const initialParentContainerWidthPx = 400; + const resultingHeight = + initialParentContainerWidthPx * DEFAULT_HEIGHT_TO_WIDTH_RATIO; + + testParentContainer.style.width = + `${initialParentContainerWidthPx}px`; + + testChart = new LineChart( + testParentContainer, + { + lineOne: mockLine, + }, + { + sizing: { + widthPx: undefined, + // default value but we want to be epxlicit in tests: + rerenderOnResize: true, + } + }, + ); + + testChart.initialChartRender(); + + const chartContainerElement = + testParentContainer.querySelector('svg.chart-container'); + expect(chartContainerElement).not.toBeNull(); + + expect(chartContainerElement!.getAttribute('width')) + .toEqual(`${initialParentContainerWidthPx}px`); + + expect(chartContainerElement!.getAttribute('height')) + .toEqual(`${resultingHeight}px`); + + const newParentContainerWidthPx = 300; + const newParentContainerHeightPx = 200; + + testParentContainer.style.width = `${newParentContainerWidthPx}px`; + testParentContainer.style.height = `${newParentContainerHeightPx}px`; + + const newHeight = + newParentContainerWidthPx * DEFAULT_HEIGHT_TO_WIDTH_RATIO; + + // We simulate the async passage of time for the animations to finish + await new Promise(resolve => { + return setTimeout(resolve, DEFAULT_TRANSITION_TIME_MS); + }); + + expect(chartContainerElement!.getAttribute('width')) + .toEqual(`${newParentContainerWidthPx}px`); + + expect(chartContainerElement!.getAttribute('height')) + .toEqual(`${newHeight}px`); + + testChart.removeEventListeners(); + }); + }); + }); + + describe('path rendering', () => { + it('renders one path', () => { + testChart = new LineChart( + testParentContainer, + { + lineOne: mockLine, + }, + ); + + testChart.initialChartRender(); + + const chartContainerElement = testParentContainer.querySelector('svg'); + expect(chartContainerElement).not.toBeNull(); + + const pathOne = chartContainerElement!.querySelector('#lineOne'); + expect(pathOne).not.toBeNull(); + }); + + it('renders multiple paths', () => { + const testChart = new LineChart( + testParentContainer, + { + lineOne: mockLine, + lineTwo: mockLine, + lineThree: mockLine, + }, + ); + + testChart.initialChartRender(); + + const chartContainerElement = testParentContainer.querySelector('svg'); + expect(chartContainerElement).not.toBeNull(); + + const pathOne = chartContainerElement!.querySelector('#lineOne'); + expect(pathOne).not.toBeNull(); + const pathTwo = chartContainerElement!.querySelector('#lineOne'); + expect(pathTwo).not.toBeNull(); + const pathThree = chartContainerElement!.querySelector('#lineOne'); + expect(pathThree).not.toBeNull(); + }); + + it('renders a path with the indicated id', () => { + testChart = new LineChart( + testParentContainer, + { + lineOne: mockLine, + }, + { + series: { + lineOne: { + id: 'line-one', + }, + }, + }, + ); + + testChart.initialChartRender(); + + const chartContainerElement = testParentContainer.querySelector('svg'); + expect(chartContainerElement).not.toBeNull(); + + const pathOne = chartContainerElement!.querySelector('#line-one'); + expect(pathOne).not.toBeNull(); + }); + + it('renders a path with the indicated color', () => { + testChart = new LineChart( + testParentContainer, + { + lineOne: mockLine, + }, + { + series: { + lineOne: { + id: 'line-one', + color: 'red', + }, + }, + }, + ); + + testChart.initialChartRender(); + + const chartContainerElement = testParentContainer.querySelector('svg'); + expect(chartContainerElement).not.toBeNull(); + + const pathOne = + chartContainerElement!.querySelector('#line-one'); + expect(pathOne).not.toBeNull(); + + expect(pathOne!.style.stroke).toEqual('red'); + }); + + it('renders two paths with the indicated order', () => { + const testChart = new LineChart( + testParentContainer, + { + lineOne: mockLine, + lineTwo: mockLine, + }, + { + series: { + lineOne: { + id: 'line-one', + order: 2, + }, + lineTwo: { + id: 'line-two', + order: 1, + }, + }, + }, + ); + + testChart.initialChartRender(); + + const pathContainerElement = + testParentContainer.querySelector('g.path-container'); + expect(pathContainerElement).not.toBeNull(); + + const children = pathContainerElement!.children; + + // First child should be the one with the lowest order,. + // It will show behind. + expect(children.item(0)?.getAttribute('id')).toEqual('line-two'); + + // Second child should be the one with the highest order. + // It will show in front. + expect(children.item(1)?.getAttribute('id')).toEqual('line-one'); + }); + + it('renders two areas with the indicated order', () => { + const testChart = new LineChart( + testParentContainer, + { + areaOne: mockLine, + areaTwo: mockLine, + }, + { + series: { + areaOne: { + id: 'area-one', + order: 2, + isArea: true, + }, + areaTwo: { + id: 'area-two', + order: 1, + isArea: true, + }, + }, + }, + ); + + testChart.initialChartRender(); + + const pathContainerElement = + testParentContainer.querySelector('g.path-container'); + expect(pathContainerElement).not.toBeNull(); + + const children = pathContainerElement!.children; + + // First child should be the one with the lowest order,. + // It will show behind. + expect(children.item(0)?.getAttribute('id')).toEqual('area-two'); + + // Second child should be the one with the highest order. + // It will show in front. + expect(children.item(1)?.getAttribute('id')).toEqual('area-one'); + }); + + it('renders two paths with the indicated coordinates', () => { + const lineChartSizingConfiguration = { + widthPx: 200, + heightToWidthRatio: 1, + padding: 0, + }; + + const testLine = [ + { + x: 10, + y: 20, + }, + { + x: 20, + y: 10, + }, + ]; + + // https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/d#path_commands + const testLinePathCommands = 'M0,0L200,200'; + + const testChart = new LineChart( + testParentContainer, + { + lineOne: testLine, + }, + { + sizing: lineChartSizingConfiguration, + series: { + lineOne: { + id: 'line-one', + }, + }, + }, + ); + + testChart.initialChartRender(); + + const pathContainerElement = + testParentContainer.querySelector('g.path-container'); + expect(pathContainerElement).not.toBeNull(); + + const children = pathContainerElement!.children; + expect(children.length).toEqual(1); + + const pathElement = children.item(0)!; + + expect(pathElement.getAttribute('id')).toEqual('line-one'); + expect(pathElement.getAttribute('d')).toEqual(testLinePathCommands); + }); + + it('renders two areas with the indicated coordinates', () => { + const lineChartSizingConfiguration = { + widthPx: 200, + heightToWidthRatio: 1, + padding: 0, + }; + + const testLine = [ + { + x: 10, + y: 20, + }, + { + x: 20, + y: 10, + }, + ]; + + // https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/d#path_commands + const testAreaPathCommands = 'M0,0L200,200L200,200L0,200Z'; + + const testChart = new LineChart( + testParentContainer, + { + lineOne: testLine, + }, + { + sizing: lineChartSizingConfiguration, + series: { + lineOne: { + id: 'area-one', + isArea: true, + }, + }, + }, + ); + + testChart.initialChartRender(); + + const pathContainerElement = + testParentContainer.querySelector('g.path-container'); + expect(pathContainerElement).not.toBeNull(); + + const children = pathContainerElement!.children; + expect(children.length).toEqual(1); + + const pathElement = children.item(0)!; + + expect(pathElement.getAttribute('id')).toEqual('area-one'); + expect(pathElement.getAttribute('d')).toEqual(testAreaPathCommands); + }); + }); +}); diff --git a/grr/server/grr_response_server/gui/ui/lib/dataviz/padding.ts b/grr/server/grr_response_server/gui/ui/lib/dataviz/padding.ts new file mode 100644 index 0000000000..7d2db98669 --- /dev/null +++ b/grr/server/grr_response_server/gui/ui/lib/dataviz/padding.ts @@ -0,0 +1,18 @@ +/** Configures the spacing around an arbitrary D3-generated element. */ +export declare interface PaddingConfiguration { + topPx: number; + rightPx: number; + bottomPx: number; + leftPx: number; +} + +/** Default padding to apply to D3-generated elements. */ +export const DEFAULT_PADDING_PX = 40; + +/** Returns a CSS-compatible value for the padding property */ +export function toCSSPaddingValue( + padding: PaddingConfiguration, + ): string { + return `${padding.topPx}px ${padding.rightPx}px ${padding.bottomPx}px ${ + padding.leftPx}px`; +} diff --git a/grr/server/grr_response_server/gui/ui/lib/markdown.ts b/grr/server/grr_response_server/gui/ui/lib/markdown.ts index 0acd4e35cc..e556b0f53e 100644 --- a/grr/server/grr_response_server/gui/ui/lib/markdown.ts +++ b/grr/server/grr_response_server/gui/ui/lib/markdown.ts @@ -5,4 +5,4 @@ const tempMarked = marked; type MarkedOptions = Parameters[1]; /** We export "marked" and its options them with their original name */ -export {tempMarked as marked, MarkedOptions}; +export {tempMarked as marked, type MarkedOptions}; diff --git a/grr/server/grr_response_server/gui/ui/lib/models/flow.ts b/grr/server/grr_response_server/gui/ui/lib/models/flow.ts index 92f5b7686e..46e44943ec 100644 --- a/grr/server/grr_response_server/gui/ui/lib/models/flow.ts +++ b/grr/server/grr_response_server/gui/ui/lib/models/flow.ts @@ -6,13 +6,13 @@ export enum FlowType { ARTIFACT_COLLECTOR_FLOW = 'ArtifactCollectorFlow', OS_QUERY_FLOW = 'OsqueryFlow', COLLECT_BROWSER_HISTORY = 'CollectBrowserHistory', - COLLECT_EFI_HASHES = 'CollectEfiHashes', DUMP_ACPI_TABLE = 'DumpACPITable', - DUMP_EFI_IMAGE = 'DumpEfiImage', DUMP_FLASH_IMAGE = 'DumpFlashImage', GET_MBR = 'GetMBR', COLLECT_FILES_BY_KNOWN_PATH = 'CollectFilesByKnownPath', COLLECT_MULTIPLE_FILES = 'CollectMultipleFiles', + STAT_MULTIPLE_FILES = 'StatMultipleFiles', + HASH_MULTIPLE_FILES = 'HashMultipleFiles', LIST_DIRECTORY = 'ListDirectory', TIMELINE_FLOW = 'TimelineFlow', READ_LOW_LEVEL = 'ReadLowLevel', @@ -75,15 +75,9 @@ export const FLOW_LIST_ITEMS_BY_TYPE: FlowsByTypeMap = { [FlowType.COLLECT_BROWSER_HISTORY]: fli( FlowType.COLLECT_BROWSER_HISTORY, 'Collect browser history', 'Collect browsing and download history from Chrome, Firefox, Edge & Safari'), - [FlowType.COLLECT_EFI_HASHES]: - fli(FlowType.COLLECT_EFI_HASHES, 'Collect EFI hashes', - 'Collect EFI volume hashes on macOS using eficheck'), [FlowType.DUMP_ACPI_TABLE]: fli(FlowType.DUMP_ACPI_TABLE, 'Dump ACPI table', 'Dump ACPI tables using chipsec'), - [FlowType.DUMP_EFI_IMAGE]: - fli(FlowType.DUMP_EFI_IMAGE, 'Dump EFI image', - 'Dump the flash image on macOS using eficheck'), [FlowType.DUMP_FLASH_IMAGE]: fli(FlowType.DUMP_FLASH_IMAGE, 'Dump flash image', 'Dump the flash image (BIOS)'), @@ -95,6 +89,12 @@ export const FLOW_LIST_ITEMS_BY_TYPE: FlowsByTypeMap = { [FlowType.COLLECT_MULTIPLE_FILES]: fli(FlowType.COLLECT_MULTIPLE_FILES, 'Collect files by search criteria', 'Search for and collect files based on their path, content or stat'), + [FlowType.STAT_MULTIPLE_FILES]: fli( + FlowType.STAT_MULTIPLE_FILES, 'Stat files', + 'Search for and collect file stats based on their path, content or stat'), + [FlowType.HASH_MULTIPLE_FILES]: fli( + FlowType.HASH_MULTIPLE_FILES, 'Hash files', + 'Search for and collect file hashes based on their path, content or stat'), [FlowType.LIST_DIRECTORY]: fli(FlowType.LIST_DIRECTORY, 'List directory', 'Lists and stats all immediate files in directory'), diff --git a/grr/server/grr_response_server/gui/ui/package-lock.json b/grr/server/grr_response_server/gui/ui/package-lock.json index 7133012c0c..7a75233c57 100644 --- a/grr/server/grr_response_server/gui/ui/package-lock.json +++ b/grr/server/grr_response_server/gui/ui/package-lock.json @@ -22,6 +22,7 @@ "@ngrx/component-store": "^15.2.1", "angular-split": "^3.0.1", "codemirror": "^5.58.3", + "d3": "5.16.0", "deep-diff": "^1.0.2", "luxon": "^1.25.0", "marked": "^0.7.0", @@ -36,6 +37,7 @@ "@angular/compiler-cli": "^15.2.5", "@angular/language-service": "^15.2.5", "@types/codemirror": "0.0.103", + "@types/d3": "5.7.2", "@types/deep-diff": "^1.0.0", "@types/jasmine": "~3.10.2", "@types/jasminewd2": "~2.0.8", @@ -4489,6 +4491,266 @@ "@types/node": "*" } }, + "node_modules/@types/d3": { + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/@types/d3/-/d3-5.7.2.tgz", + "integrity": "sha512-7/wClB8ycneWGy3jdvLfXKTd5SoTg9hji7IdJ0RuO9xTY54YpJ8zlcFADcXhY1J3kCBwxp+/1jeN6a5OMwgYOw==", + "dev": true, + "dependencies": { + "@types/d3-array": "^1", + "@types/d3-axis": "*", + "@types/d3-brush": "*", + "@types/d3-chord": "*", + "@types/d3-collection": "*", + "@types/d3-color": "*", + "@types/d3-contour": "*", + "@types/d3-dispatch": "*", + "@types/d3-drag": "*", + "@types/d3-dsv": "*", + "@types/d3-ease": "*", + "@types/d3-fetch": "*", + "@types/d3-force": "*", + "@types/d3-format": "*", + "@types/d3-geo": "*", + "@types/d3-hierarchy": "*", + "@types/d3-interpolate": "*", + "@types/d3-path": "*", + "@types/d3-polygon": "*", + "@types/d3-quadtree": "*", + "@types/d3-random": "*", + "@types/d3-scale": "*", + "@types/d3-scale-chromatic": "*", + "@types/d3-selection": "*", + "@types/d3-shape": "*", + "@types/d3-time": "*", + "@types/d3-time-format": "*", + "@types/d3-timer": "*", + "@types/d3-transition": "*", + "@types/d3-voronoi": "*", + "@types/d3-zoom": "*" + } + }, + "node_modules/@types/d3-array": { + "version": "1.2.9", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-1.2.9.tgz", + "integrity": "sha512-E/7RgPr2ylT5dWG0CswMi9NpFcjIEDqLcUSBgNHe/EMahfqYaTx4zhcggG3khqoEB/leY4Vl6nTSbwLUPjXceA==", + "dev": true + }, + "node_modules/@types/d3-axis": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.2.tgz", + "integrity": "sha512-uGC7DBh0TZrU/LY43Fd8Qr+2ja1FKmH07q2FoZFHo1eYl8aj87GhfVoY1saJVJiq24rp1+wpI6BvQJMKgQm8oA==", + "dev": true, + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-brush": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.2.tgz", + "integrity": "sha512-2TEm8KzUG3N7z0TrSKPmbxByBx54M+S9lHoP2J55QuLU0VSQ9mE96EJSAOVNEqd1bbynMjeTS9VHmz8/bSw8rA==", + "dev": true, + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-chord": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.2.tgz", + "integrity": "sha512-abT/iLHD3sGZwqMTX1TYCMEulr+wBd0SzyOQnjYNLp7sngdOHYtNkMRI5v3w5thoN+BWtlHVDx2Osvq6fxhZWw==", + "dev": true + }, + "node_modules/@types/d3-collection": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/@types/d3-collection/-/d3-collection-1.0.10.tgz", + "integrity": "sha512-54Fdv8u5JbuXymtmXm2SYzi1x/Svt+jfWBU5junkhrCewL92VjqtCBDn97coBRVwVFmYNnVTNDyV8gQyPYfm+A==", + "dev": true + }, + "node_modules/@types/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-HKuicPHJuvPgCD+np6Se9MQvS6OCbJmOjGvylzMJRlDwUXjKTTXs6Pwgk79O09Vj/ho3u1ofXnhFOaEWWPrlwA==", + "dev": true + }, + "node_modules/@types/d3-contour": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.2.tgz", + "integrity": "sha512-k6/bGDoAGJZnZWaKzeB+9glgXCYGvh6YlluxzBREiVo8f/X2vpTEdgPy9DN7Z2i42PZOZ4JDhVdlTSTSkLDPlQ==", + "dev": true, + "dependencies": { + "@types/d3-array": "*", + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-dispatch": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.2.tgz", + "integrity": "sha512-rxN6sHUXEZYCKV05MEh4z4WpPSqIw+aP7n9ZN6WYAAvZoEAghEK1WeVZMZcHRBwyaKflU43PCUAJNjFxCzPDjg==", + "dev": true + }, + "node_modules/@types/d3-drag": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.2.tgz", + "integrity": "sha512-qmODKEDvyKWVHcWWCOVcuVcOwikLVsyc4q4EBJMREsoQnR2Qoc2cZQUyFUPgO9q4S3qdSqJKBsuefv+h0Qy+tw==", + "dev": true, + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-dsv": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.1.tgz", + "integrity": "sha512-76pBHCMTvPLt44wFOieouXcGXWOF0AJCceUvaFkxSZEu4VDUdv93JfpMa6VGNFs01FHfuP4a5Ou68eRG1KBfTw==", + "dev": true + }, + "node_modules/@types/d3-ease": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.0.tgz", + "integrity": "sha512-aMo4eaAOijJjA6uU+GIeW018dvy9+oH5Y2VPPzjjfxevvGQ/oRDs+tfYC9b50Q4BygRR8yE2QCLsrT0WtAVseA==", + "dev": true + }, + "node_modules/@types/d3-fetch": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.2.tgz", + "integrity": "sha512-gllwYWozWfbep16N9fByNBDTkJW/SyhH6SGRlXloR7WdtAaBui4plTP+gbUgiEot7vGw/ZZop1yDZlgXXSuzjA==", + "dev": true, + "dependencies": { + "@types/d3-dsv": "*" + } + }, + "node_modules/@types/d3-force": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.4.tgz", + "integrity": "sha512-q7xbVLrWcXvSBBEoadowIUJ7sRpS1yvgMWnzHJggFy5cUZBq2HZL5k/pBSm0GdYWS1vs5/EDwMjSKF55PDY4Aw==", + "dev": true + }, + "node_modules/@types/d3-format": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.1.tgz", + "integrity": "sha512-5KY70ifCCzorkLuIkDe0Z9YTf9RR2CjBX1iaJG+rgM/cPP+sO+q9YdQ9WdhQcgPj1EQiJ2/0+yUkkziTG6Lubg==", + "dev": true + }, + "node_modules/@types/d3-geo": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.0.3.tgz", + "integrity": "sha512-bK9uZJS3vuDCNeeXQ4z3u0E7OeJZXjUgzFdSOtNtMCJCLvDtWDwfpRVWlyt3y8EvRzI0ccOu9xlMVirawolSCw==", + "dev": true, + "dependencies": { + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-hierarchy": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", + "integrity": "sha512-9hjRTVoZjRFR6xo8igAJyNXQyPX6Aq++Nhb5ebrUF414dv4jr2MitM2fWiOY475wa3Za7TOS2Gh9fmqEhLTt0A==", + "dev": true + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-jx5leotSeac3jr0RePOH1KdR9rISG91QIE4Q2PYTu4OymLTZfA3SrnURSLzKH48HmXVUru50b8nje4E79oQSQw==", + "dev": true, + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.0.0.tgz", + "integrity": "sha512-0g/A+mZXgFkQxN3HniRDbXMN79K3CdTpLsevj+PXiTcb2hVyvkZUBg37StmgCQkaD84cUJ4uaDAWq7UJOQy2Tg==", + "dev": true + }, + "node_modules/@types/d3-polygon": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.0.tgz", + "integrity": "sha512-D49z4DyzTKXM0sGKVqiTDTYr+DHg/uxsiWDAkNrwXYuiZVd9o9wXZIo+YsHkifOiyBkmSWlEngHCQme54/hnHw==", + "dev": true + }, + "node_modules/@types/d3-quadtree": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.2.tgz", + "integrity": "sha512-QNcK8Jguvc8lU+4OfeNx+qnVy7c0VrDJ+CCVFS9srBo2GL9Y18CnIxBdTF3v38flrGy5s1YggcoAiu6s4fLQIw==", + "dev": true + }, + "node_modules/@types/d3-random": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.1.tgz", + "integrity": "sha512-IIE6YTekGczpLYo/HehAy3JGF1ty7+usI97LqraNa8IiDur+L44d0VOjAvFQWJVdZOJHukUJw+ZdZBlgeUsHOQ==", + "dev": true + }, + "node_modules/@types/d3-scale": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.3.tgz", + "integrity": "sha512-PATBiMCpvHJSMtZAMEhc2WyL+hnzarKzI6wAHYjhsonjWJYGq5BXTzQjv4l8m2jO183/4wZ90rKvSeT7o72xNQ==", + "dev": true, + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-scale-chromatic": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.0.tgz", + "integrity": "sha512-dsoJGEIShosKVRBZB0Vo3C8nqSDqVGujJU6tPznsBJxNJNwMF8utmS83nvCBKQYPpjCzaaHcrf66iTRpZosLPw==", + "dev": true + }, + "node_modules/@types/d3-selection": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.5.tgz", + "integrity": "sha512-xCB0z3Hi8eFIqyja3vW8iV01+OHGYR2di/+e+AiOcXIOrY82lcvWW8Ke1DYE/EUVMsBl4Db9RppSBS3X1U6J0w==", + "dev": true + }, + "node_modules/@types/d3-shape": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.1.tgz", + "integrity": "sha512-6Uh86YFF7LGg4PQkuO2oG6EMBRLuW9cbavUW46zkIO5kuS2PfTqo2o9SkgtQzguBHbLgNnU90UNsITpsX1My+A==", + "dev": true, + "dependencies": { + "@types/d3-path": "*" + } + }, + "node_modules/@types/d3-time": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.0.tgz", + "integrity": "sha512-sZLCdHvBUcNby1cB6Fd3ZBrABbjz3v1Vm90nysCQ6Vt7vd6e/h9Lt7SiJUoEX0l4Dzc7P5llKyhqSi1ycSf1Hg==", + "dev": true + }, + "node_modules/@types/d3-time-format": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.0.tgz", + "integrity": "sha512-yjfBUe6DJBsDin2BMIulhSHmr5qNR5Pxs17+oW4DoVPyVIXZ+m6bs7j1UVKP08Emv6jRmYrYqxYzO63mQxy1rw==", + "dev": true + }, + "node_modules/@types/d3-timer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.0.tgz", + "integrity": "sha512-HNB/9GHqu7Fo8AQiugyJbv6ZxYz58wef0esl4Mv828w1ZKpAshw/uFWVDUcIB9KKFeFKoxS3cHY07FFgtTRZ1g==", + "dev": true + }, + "node_modules/@types/d3-transition": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.3.tgz", + "integrity": "sha512-/S90Od8Id1wgQNvIA8iFv9jRhCiZcGhPd2qX0bKF/PS+y0W5CrXKgIiELd2CvG1mlQrWK/qlYh3VxicqG1ZvgA==", + "dev": true, + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-voronoi": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@types/d3-voronoi/-/d3-voronoi-1.1.9.tgz", + "integrity": "sha512-DExNQkaHd1F3dFPvGA/Aw2NGyjMln6E9QzsiqOcBgnE+VInYnFBHBBySbZQts6z6xD+5jTfKCP7M4OqMyVjdwQ==", + "dev": true + }, + "node_modules/@types/d3-zoom": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.3.tgz", + "integrity": "sha512-OWk1yYIIWcZ07+igN6BeoG6rqhnJ/pYe+R1qWFM2DtW49zsoSjgb9G5xB0ZXA8hh2jAzey1XuRmMSoXdKw8MDA==", + "dev": true, + "dependencies": { + "@types/d3-interpolate": "*", + "@types/d3-selection": "*" + } + }, "node_modules/@types/deep-diff": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/@types/deep-diff/-/deep-diff-1.0.2.tgz", @@ -4544,6 +4806,12 @@ "@types/range-parser": "*" } }, + "node_modules/@types/geojson": { + "version": "7946.0.10", + "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.10.tgz", + "integrity": "sha512-Nmh0K3iWQJzniTuPRcJn5hxXkfB1T1pgB89SBig5PlJQU5yocazeu4jATJlaA0GYFKWMqDdvYemoSnF2pXgLVA==", + "dev": true + }, "node_modules/@types/http-proxy": { "version": "1.17.10", "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.10.tgz", @@ -6102,8 +6370,7 @@ "node_modules/commander": { "version": "2.20.3", "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", - "dev": true + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" }, "node_modules/commondir": { "version": "1.0.1", @@ -6555,6 +6822,281 @@ "integrity": "sha512-GAj5FOq0Hd+RsCGVJxZuKaIDXDf3h6GQoNEjFgbLLI/trgtavwUbSnZ5pVfg27DVCaWjIohryS0JFwIJyT2cMg==", "dev": true }, + "node_modules/d3": { + "version": "5.16.0", + "resolved": "https://registry.npmjs.org/d3/-/d3-5.16.0.tgz", + "integrity": "sha512-4PL5hHaHwX4m7Zr1UapXW23apo6pexCgdetdJ5kTmADpG/7T9Gkxw0M0tf/pjoB63ezCCm0u5UaFYy2aMt0Mcw==", + "dependencies": { + "d3-array": "1", + "d3-axis": "1", + "d3-brush": "1", + "d3-chord": "1", + "d3-collection": "1", + "d3-color": "1", + "d3-contour": "1", + "d3-dispatch": "1", + "d3-drag": "1", + "d3-dsv": "1", + "d3-ease": "1", + "d3-fetch": "1", + "d3-force": "1", + "d3-format": "1", + "d3-geo": "1", + "d3-hierarchy": "1", + "d3-interpolate": "1", + "d3-path": "1", + "d3-polygon": "1", + "d3-quadtree": "1", + "d3-random": "1", + "d3-scale": "2", + "d3-scale-chromatic": "1", + "d3-selection": "1", + "d3-shape": "1", + "d3-time": "1", + "d3-time-format": "2", + "d3-timer": "1", + "d3-transition": "1", + "d3-voronoi": "1", + "d3-zoom": "1" + } + }, + "node_modules/d3-array": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-1.2.4.tgz", + "integrity": "sha512-KHW6M86R+FUPYGb3R5XiYjXPq7VzwxZ22buHhAEVG5ztoEcZZMLov530mmccaqA1GghZArjQV46fuc8kUqhhHw==" + }, + "node_modules/d3-axis": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-1.0.12.tgz", + "integrity": "sha512-ejINPfPSNdGFKEOAtnBtdkpr24c4d4jsei6Lg98mxf424ivoDP2956/5HDpIAtmHo85lqT4pruy+zEgvRUBqaQ==" + }, + "node_modules/d3-brush": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-1.1.6.tgz", + "integrity": "sha512-7RW+w7HfMCPyZLifTz/UnJmI5kdkXtpCbombUSs8xniAyo0vIbrDzDwUJB6eJOgl9u5DQOt2TQlYumxzD1SvYA==", + "dependencies": { + "d3-dispatch": "1", + "d3-drag": "1", + "d3-interpolate": "1", + "d3-selection": "1", + "d3-transition": "1" + } + }, + "node_modules/d3-chord": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-1.0.6.tgz", + "integrity": "sha512-JXA2Dro1Fxw9rJe33Uv+Ckr5IrAa74TlfDEhE/jfLOaXegMQFQTAgAw9WnZL8+HxVBRXaRGCkrNU7pJeylRIuA==", + "dependencies": { + "d3-array": "1", + "d3-path": "1" + } + }, + "node_modules/d3-collection": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/d3-collection/-/d3-collection-1.0.7.tgz", + "integrity": "sha512-ii0/r5f4sjKNTfh84Di+DpztYwqKhEyUlKoPrzUFfeSkWxjW49xU2QzO9qrPrNkpdI0XJkfzvmTu8V2Zylln6A==" + }, + "node_modules/d3-color": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-1.4.1.tgz", + "integrity": "sha512-p2sTHSLCJI2QKunbGb7ocOh7DgTAn8IrLx21QRc/BSnodXM4sv6aLQlnfpvehFMLZEfBc6g9pH9SWQccFYfJ9Q==" + }, + "node_modules/d3-contour": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-1.3.2.tgz", + "integrity": "sha512-hoPp4K/rJCu0ladiH6zmJUEz6+u3lgR+GSm/QdM2BBvDraU39Vr7YdDCicJcxP1z8i9B/2dJLgDC1NcvlF8WCg==", + "dependencies": { + "d3-array": "^1.1.1" + } + }, + "node_modules/d3-dispatch": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-1.0.6.tgz", + "integrity": "sha512-fVjoElzjhCEy+Hbn8KygnmMS7Or0a9sI2UzGwoB7cCtvI1XpVN9GpoYlnb3xt2YV66oXYb1fLJ8GMvP4hdU1RA==" + }, + "node_modules/d3-drag": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-1.2.5.tgz", + "integrity": "sha512-rD1ohlkKQwMZYkQlYVCrSFxsWPzI97+W+PaEIBNTMxRuxz9RF0Hi5nJWHGVJ3Om9d2fRTe1yOBINJyy/ahV95w==", + "dependencies": { + "d3-dispatch": "1", + "d3-selection": "1" + } + }, + "node_modules/d3-dsv": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-1.2.0.tgz", + "integrity": "sha512-9yVlqvZcSOMhCYzniHE7EVUws7Fa1zgw+/EAV2BxJoG3ME19V6BQFBwI855XQDsxyOuG7NibqRMTtiF/Qup46g==", + "dependencies": { + "commander": "2", + "iconv-lite": "0.4", + "rw": "1" + }, + "bin": { + "csv2json": "bin/dsv2json", + "csv2tsv": "bin/dsv2dsv", + "dsv2dsv": "bin/dsv2dsv", + "dsv2json": "bin/dsv2json", + "json2csv": "bin/json2dsv", + "json2dsv": "bin/json2dsv", + "json2tsv": "bin/json2dsv", + "tsv2csv": "bin/dsv2dsv", + "tsv2json": "bin/dsv2json" + } + }, + "node_modules/d3-ease": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-1.0.7.tgz", + "integrity": "sha512-lx14ZPYkhNx0s/2HX5sLFUI3mbasHjSSpwO/KaaNACweVwxUruKyWVcb293wMv1RqTPZyZ8kSZ2NogUZNcLOFQ==" + }, + "node_modules/d3-fetch": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-1.2.0.tgz", + "integrity": "sha512-yC78NBVcd2zFAyR/HnUiBS7Lf6inSCoWcSxFfw8FYL7ydiqe80SazNwoffcqOfs95XaLo7yebsmQqDKSsXUtvA==", + "dependencies": { + "d3-dsv": "1" + } + }, + "node_modules/d3-force": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-1.2.1.tgz", + "integrity": "sha512-HHvehyaiUlVo5CxBJ0yF/xny4xoaxFxDnBXNvNcfW9adORGZfyNF1dj6DGLKyk4Yh3brP/1h3rnDzdIAwL08zg==", + "dependencies": { + "d3-collection": "1", + "d3-dispatch": "1", + "d3-quadtree": "1", + "d3-timer": "1" + } + }, + "node_modules/d3-format": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-1.4.5.tgz", + "integrity": "sha512-J0piedu6Z8iB6TbIGfZgDzfXxUFN3qQRMofy2oPdXzQibYGqPB/9iMcxr/TGalU+2RsyDO+U4f33id8tbnSRMQ==" + }, + "node_modules/d3-geo": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-1.12.1.tgz", + "integrity": "sha512-XG4d1c/UJSEX9NfU02KwBL6BYPj8YKHxgBEw5om2ZnTRSbIcego6dhHwcxuSR3clxh0EpE38os1DVPOmnYtTPg==", + "dependencies": { + "d3-array": "1" + } + }, + "node_modules/d3-hierarchy": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-1.1.9.tgz", + "integrity": "sha512-j8tPxlqh1srJHAtxfvOUwKNYJkQuBFdM1+JAUfq6xqH5eAqf93L7oG1NVqDa4CpFZNvnNKtCYEUC8KY9yEn9lQ==" + }, + "node_modules/d3-interpolate": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-1.4.0.tgz", + "integrity": "sha512-V9znK0zc3jOPV4VD2zZn0sDhZU3WAE2bmlxdIwwQPPzPjvyLkd8B3JUVdS1IDUFDkWZ72c9qnv1GK2ZagTZ8EA==", + "dependencies": { + "d3-color": "1" + } + }, + "node_modules/d3-path": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz", + "integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==" + }, + "node_modules/d3-polygon": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-1.0.6.tgz", + "integrity": "sha512-k+RF7WvI08PC8reEoXa/w2nSg5AUMTi+peBD9cmFc+0ixHfbs4QmxxkarVal1IkVkgxVuk9JSHhJURHiyHKAuQ==" + }, + "node_modules/d3-quadtree": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-1.0.7.tgz", + "integrity": "sha512-RKPAeXnkC59IDGD0Wu5mANy0Q2V28L+fNe65pOCXVdVuTJS3WPKaJlFHer32Rbh9gIo9qMuJXio8ra4+YmIymA==" + }, + "node_modules/d3-random": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-1.1.2.tgz", + "integrity": "sha512-6AK5BNpIFqP+cx/sreKzNjWbwZQCSUatxq+pPRmFIQaWuoD+NrbVWw7YWpHiXpCQ/NanKdtGDuB+VQcZDaEmYQ==" + }, + "node_modules/d3-scale": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-2.2.2.tgz", + "integrity": "sha512-LbeEvGgIb8UMcAa0EATLNX0lelKWGYDQiPdHj+gLblGVhGLyNbaCn3EvrJf0A3Y/uOOU5aD6MTh5ZFCdEwGiCw==", + "dependencies": { + "d3-array": "^1.2.0", + "d3-collection": "1", + "d3-format": "1", + "d3-interpolate": "1", + "d3-time": "1", + "d3-time-format": "2" + } + }, + "node_modules/d3-scale-chromatic": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-1.5.0.tgz", + "integrity": "sha512-ACcL46DYImpRFMBcpk9HhtIyC7bTBR4fNOPxwVSl0LfulDAwyiHyPOTqcDG1+t5d4P9W7t/2NAuWu59aKko/cg==", + "dependencies": { + "d3-color": "1", + "d3-interpolate": "1" + } + }, + "node_modules/d3-selection": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-1.4.2.tgz", + "integrity": "sha512-SJ0BqYihzOjDnnlfyeHT0e30k0K1+5sR3d5fNueCNeuhZTnGw4M4o8mqJchSwgKMXCNFo+e2VTChiSJ0vYtXkg==" + }, + "node_modules/d3-shape": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz", + "integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==", + "dependencies": { + "d3-path": "1" + } + }, + "node_modules/d3-time": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-1.1.0.tgz", + "integrity": "sha512-Xh0isrZ5rPYYdqhAVk8VLnMEidhz5aP7htAADH6MfzgmmicPkTo8LhkLxci61/lCB7n7UmE3bN0leRt+qvkLxA==" + }, + "node_modules/d3-time-format": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-2.3.0.tgz", + "integrity": "sha512-guv6b2H37s2Uq/GefleCDtbe0XZAuy7Wa49VGkPVPMfLL9qObgBST3lEHJBMUp8S7NdLQAGIvr2KXk8Hc98iKQ==", + "dependencies": { + "d3-time": "1" + } + }, + "node_modules/d3-timer": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-1.0.10.tgz", + "integrity": "sha512-B1JDm0XDaQC+uvo4DT79H0XmBskgS3l6Ve+1SBCfxgmtIb1AVrPIoqd+nPSv+loMX8szQ0sVUhGngL7D5QPiXw==" + }, + "node_modules/d3-transition": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-1.3.2.tgz", + "integrity": "sha512-sc0gRU4PFqZ47lPVHloMn9tlPcv8jxgOQg+0zjhfZXMQuvppjG6YuwdMBE0TuqCZjeJkLecku/l9R0JPcRhaDA==", + "dependencies": { + "d3-color": "1", + "d3-dispatch": "1", + "d3-ease": "1", + "d3-interpolate": "1", + "d3-selection": "^1.1.0", + "d3-timer": "1" + } + }, + "node_modules/d3-voronoi": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/d3-voronoi/-/d3-voronoi-1.1.4.tgz", + "integrity": "sha512-dArJ32hchFsrQ8uMiTBLq256MpnZjeuBtdHpaDlYuQyjU0CVzCJl/BVW+SkszaAeH95D/8gxqAhgx0ouAWAfRg==" + }, + "node_modules/d3-zoom": { + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-1.8.3.tgz", + "integrity": "sha512-VoLXTK4wvy1a0JpH2Il+F2CiOhVu7VRXWF5M/LroMIh3/zBAC3WAt7QoIvPibOavVo20hN6/37vwAsdBejLyKQ==", + "dependencies": { + "d3-dispatch": "1", + "d3-drag": "1", + "d3-interpolate": "1", + "d3-selection": "1", + "d3-transition": "1" + } + }, "node_modules/damerau-levenshtein": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz", @@ -7972,7 +8514,6 @@ "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "dev": true, "dependencies": { "safer-buffer": ">= 2.1.2 < 3" }, @@ -11602,6 +12143,11 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/rw": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", + "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==" + }, "node_modules/rxjs": { "version": "7.8.0", "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.0.tgz", @@ -11633,8 +12179,7 @@ "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "dev": true + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "node_modules/safevalues": { "version": "0.4.3", diff --git a/grr/server/grr_response_server/gui/ui/package.json b/grr/server/grr_response_server/gui/ui/package.json index dd4c75c630..d633ed1e4a 100644 --- a/grr/server/grr_response_server/gui/ui/package.json +++ b/grr/server/grr_response_server/gui/ui/package.json @@ -25,6 +25,7 @@ "@ngrx/component-store": "^15.2.1", "angular-split": "^3.0.1", "codemirror": "^5.58.3", + "d3": "5.16.0", "deep-diff": "^1.0.2", "luxon": "^1.25.0", "marked": "^0.7.0", @@ -39,6 +40,7 @@ "@angular/compiler-cli": "^15.2.5", "@angular/language-service": "^15.2.5", "@types/codemirror": "0.0.103", + "@types/d3": "5.7.2", "@types/deep-diff": "^1.0.0", "@types/jasmine": "~3.10.2", "@types/jasminewd2": "~2.0.8", diff --git a/grr/server/grr_response_server/gui/ui/store/client_page_global_store_test.ts b/grr/server/grr_response_server/gui/ui/store/client_page_global_store_test.ts index 60916c3e72..4b16203e34 100644 --- a/grr/server/grr_response_server/gui/ui/store/client_page_global_store_test.ts +++ b/grr/server/grr_response_server/gui/ui/store/client_page_global_store_test.ts @@ -300,7 +300,7 @@ describe('ClientPageGlobalStore', () => { lastActiveAt: new Date(999), startedAt: new Date(456), creator: 'morty', - name: 'KeepAlive', + name: 'ListDirectory', state: FlowState.FINISHED, isRobot: false, }, @@ -352,7 +352,7 @@ describe('ClientPageGlobalStore', () => { lastActiveAt: '999000', startedAt: '456000', creator: 'morty', - name: 'KeepAlive', + name: 'ListDirectory', state: ApiFlowState.TERMINATED, isRobot: false, }, @@ -495,16 +495,16 @@ describe('ClientPageGlobalStore', () => { configGlobalStore.mockedObservables.flowDescriptors$.next( newFlowDescriptorMap( {name: 'ClientSideFileFinder'}, - {name: 'KeepAlive', defaultArgs: {foo: 1}}, + {name: 'ListDirectory', defaultArgs: {foo: 1}}, )); - clientPageGlobalStore.startFlowConfiguration('KeepAlive'); + clientPageGlobalStore.startFlowConfiguration('ListDirectory'); clientPageGlobalStore.selectedFlowDescriptor$.subscribe(flow => { // First value is expected to be undefined. if (!flow) { return; } - expect(flow.name).toEqual('KeepAlive'); + expect(flow.name).toEqual('ListDirectory'); expect(flow.defaultArgs).toEqual({foo: 1}); done(); }); @@ -512,15 +512,15 @@ describe('ClientPageGlobalStore', () => { it('emits the supplied args in selectedFlowDescriptor$', done => { configGlobalStore.mockedObservables.flowDescriptors$.next( - newFlowDescriptorMap({name: 'KeepAlive', defaultArgs: {foo: 1}})); - clientPageGlobalStore.startFlowConfiguration('KeepAlive', {foo: 42}); + newFlowDescriptorMap({name: 'ListDirectory', defaultArgs: {foo: 1}})); + clientPageGlobalStore.startFlowConfiguration('ListDirectory', {foo: 42}); clientPageGlobalStore.selectedFlowDescriptor$.subscribe(flow => { // First value is expected to be undefined. if (!flow) { return; } - expect(flow.name).toEqual('KeepAlive'); + expect(flow.name).toEqual('ListDirectory'); expect(flow.defaultArgs).toEqual({foo: 42}); done(); }); @@ -529,7 +529,7 @@ describe('ClientPageGlobalStore', () => { it('fails when selecting unknown flow', done => { configGlobalStore.mockedObservables.flowDescriptors$.next( newFlowDescriptorMap( - {name: 'KeepAlive'}, + {name: 'ListDirectory'}, )); clientPageGlobalStore.startFlowConfiguration('unknown'); @@ -546,10 +546,10 @@ describe('ClientPageGlobalStore', () => { configGlobalStore.mockedObservables.flowDescriptors$.next( newFlowDescriptorMap( {name: 'ClientSideFileFinder'}, - {name: 'KeepAlive'}, + {name: 'ListDirectory'}, )); - clientPageGlobalStore.startFlowConfiguration('KeepAlive'); + clientPageGlobalStore.startFlowConfiguration('ListDirectory'); clientPageGlobalStore.stopFlowConfiguration(); clientPageGlobalStore.selectedFlowDescriptor$.subscribe(flow => { expect(flow).toBeNull(); diff --git a/grr/server/grr_response_server/gui/ui/store/config_global_store_test.ts b/grr/server/grr_response_server/gui/ui/store/config_global_store_test.ts index 6020402453..c2e420fa17 100644 --- a/grr/server/grr_response_server/gui/ui/store/config_global_store_test.ts +++ b/grr/server/grr_response_server/gui/ui/store/config_global_store_test.ts @@ -45,9 +45,9 @@ describe('ConfigGlobalStore', () => { } ], [ - 'KeepAlive', { - name: 'KeepAlive', - friendlyName: 'KeepAlive', + 'Kill', { + name: 'Kill', + friendlyName: 'Kill GRR agent process', category: 'Misc', defaultArgs: {}, } @@ -67,7 +67,8 @@ describe('ConfigGlobalStore', () => { defaultArgs: {'@type': 'test-type'} }, { - name: 'KeepAlive', + name: 'Kill', + friendlyName: 'Kill GRR agent process', category: 'Misc', defaultArgs: {'@type': 'test-type'} }, diff --git a/grr/server/grr_response_server/gui/ui/store/new_hunt_local_store_test.ts b/grr/server/grr_response_server/gui/ui/store/new_hunt_local_store_test.ts index 5046699f0b..ae8e7be820 100644 --- a/grr/server/grr_response_server/gui/ui/store/new_hunt_local_store_test.ts +++ b/grr/server/grr_response_server/gui/ui/store/new_hunt_local_store_test.ts @@ -57,7 +57,7 @@ describe('NewHuntLocalStore', () => { }, { name: 'GetFile', - friendlyName: 'KeepAlive', + friendlyName: 'Get the specified file', category: 'a', defaultArgs: {}, })); @@ -82,7 +82,7 @@ describe('NewHuntLocalStore', () => { }, { name: 'GetFile', - friendlyName: 'KeepAlive', + friendlyName: 'Get the specified file', category: 'a', defaultArgs: {}, })); @@ -109,7 +109,7 @@ describe('NewHuntLocalStore', () => { flow, descriptor: { name: 'GetFile', - friendlyName: 'KeepAlive', + friendlyName: 'Get the specified file', category: 'a', defaultArgs: {}, }, @@ -126,7 +126,7 @@ describe('NewHuntLocalStore', () => { }, { name: 'GetFile', - friendlyName: 'KeepAlive', + friendlyName: 'Get the specified file', category: 'a', defaultArgs: {}, })); @@ -205,7 +205,7 @@ describe('NewHuntLocalStore', () => { }, { name: 'GetFile', - friendlyName: 'KeepAlive', + friendlyName: 'Get the specified file', }); configGlobalStore.mockedObservables.flowDescriptors$.next(descriptorMap); const apiFlow = { @@ -258,7 +258,7 @@ describe('NewHuntLocalStore', () => { }, { name: 'GetFile', - friendlyName: 'KeepAlive', + friendlyName: 'Get the specified file', })); httpApiService.mockedObservables.fetchHunt.next({ huntId: 'H1234', diff --git a/grr/server/grr_response_server/hunt_test.py b/grr/server/grr_response_server/hunt_test.py index f654195e4d..6d04f01fb9 100644 --- a/grr/server/grr_response_server/hunt_test.py +++ b/grr/server/grr_response_server/hunt_test.py @@ -10,7 +10,6 @@ from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import client as rdf_client -from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder from grr_response_core.lib.rdfvalues import paths as rdf_paths from grr_response_core.lib.rdfvalues import structs as rdf_structs @@ -40,14 +39,15 @@ class HuntTest(stats_test_lib.StatsTestMixin, test_lib.GRRBaseTest): """Tests for the relational hunts implementation.""" - def GetFileHuntArgs(self): - args = transfer.GetFileArgs() - args.pathspec.path = "/tmp/evil.txt" - args.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS + def ClientFileFinderHuntArgs(self): + args = rdf_file_finder.FileFinderArgs() + args.paths = ["/tmp/evil.txt"] + args.action.action_type = rdf_file_finder.FileFinderAction.Action.DOWNLOAD return rdf_hunt_objects.HuntArguments.Standard( - flow_name=transfer.GetFile.__name__, - flow_args=rdf_structs.AnyValue.Pack(args)) + flow_name=file_finder.ClientFileFinder.__name__, + flow_args=rdf_structs.AnyValue.Pack(args), + ) def _CreateHunt(self, **kwargs): hunt_obj = rdf_hunt_objects.Hunt(creator=self.test_username, **kwargs) @@ -196,7 +196,8 @@ def testForemanRulesWorkCorrectlyWithStandardHunt(self): hunt_obj = rdf_hunt_objects.Hunt( client_rule_set=client_rule_set, client_rate=0, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) hunt_obj.args.hunt_type = hunt_obj.args.HuntType.STANDARD data_store.REL_DB.WriteHuntObject(hunt_obj) @@ -222,7 +223,8 @@ def testStandardHuntFlowsReportBackToTheHunt(self): num_clients=10, client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) hunt_counters = data_store.REL_DB.ReadHuntCounters(hunt_id) self.assertEqual(hunt_counters.num_clients, 10) @@ -235,7 +237,8 @@ def testHangingClientsAreCorrectlyAccountedFor(self): hunt_obj = rdf_hunt_objects.Hunt( client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) hunt.CreateHunt(hunt_obj) hunt_obj = hunt.StartHunt(hunt_obj.hunt_id) @@ -256,7 +259,8 @@ def testPausingAndRestartingDoesNotStartHuntTwiceOnTheSameClient(self): num_clients=10, client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) for client_id in client_ids: flows = data_store.REL_DB.ReadAllFlowObjects(client_id=client_id) @@ -277,7 +281,7 @@ def testHuntIsPausedOnReachingClientLimit(self): client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, client_limit=5, - args=self.GetFileHuntArgs(), + args=self.ClientFileFinderHuntArgs(), ) hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) @@ -299,7 +303,8 @@ def testHuntClientRateIsAppliedCorrectly(self): num_clients=10, client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=1, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) requests = data_store.REL_DB.ReadFlowProcessingRequests() requests.sort(key=lambda r: r.delivery_time) @@ -371,13 +376,16 @@ def testResultsAreCorrectlyWrittenAndAreFilterable(self): num_clients=10, client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) results = data_store.REL_DB.ReadHuntResults(hunt_id, 0, sys.maxsize) self.assertLen(results, 5) for r in results: - self.assertIsInstance(r.payload, rdf_client_fs.StatEntry) - self.assertEqual(r.payload.pathspec.CollapsePath(), "/tmp/evil.txt") + self.assertIsInstance(r.payload, rdf_file_finder.FileFinderResult) + self.assertEqual( + r.payload.stat_entry.pathspec.CollapsePath(), "/tmp/evil.txt" + ) def testOutputPluginsAreCorrectlyAppliedAndTheirStatusCanBeRead(self): hunt_test_lib.StatefulDummyHuntOutputPlugin.data = [] @@ -391,8 +399,9 @@ def testOutputPluginsAreCorrectlyAppliedAndTheirStatusCanBeRead(self): client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs(), - output_plugins=[plugin_descriptor]) + args=self.ClientFileFinderHuntArgs(), + output_plugins=[plugin_descriptor], + ) self.assertEqual(hunt_test_lib.DummyHuntOutputPlugin.num_calls, 5) self.assertEqual(hunt_test_lib.DummyHuntOutputPlugin.num_responses, 5) @@ -422,8 +431,9 @@ def testOutputPluginsErrorsAreCorrectlyWrittenAndCanBeRead(self): client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs(), - output_plugins=[failing_plugin_descriptor]) + args=self.ClientFileFinderHuntArgs(), + output_plugins=[failing_plugin_descriptor], + ) errors = data_store.REL_DB.ReadHuntOutputPluginLogEntries( hunt_id, @@ -452,8 +462,9 @@ def testOutputPluginsMaintainGlobalState(self): client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs(), - output_plugins=[plugin_descriptor]) + args=self.ClientFileFinderHuntArgs(), + output_plugins=[plugin_descriptor], + ) # Output plugins should have been called 5 times, adding a number # to the "data" list on every call and incrementing it each time. @@ -469,8 +480,9 @@ def testOutputPluginFlushErrorIsLoggedProperly(self): client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs(), - output_plugins=[plugin_descriptor]) + args=self.ClientFileFinderHuntArgs(), + output_plugins=[plugin_descriptor], + ) logs = data_store.REL_DB.ReadHuntOutputPluginLogEntries( hunt_id, @@ -505,8 +517,9 @@ def testFailingOutputPluginDoesNotAffectOtherOutputPlugins(self): client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs(), - output_plugins=[failing_plugin_descriptor, plugin_descriptor]) + args=self.ClientFileFinderHuntArgs(), + output_plugins=[failing_plugin_descriptor, plugin_descriptor], + ) errors = data_store.REL_DB.ReadHuntOutputPluginLogEntries( hunt_id, @@ -543,8 +556,9 @@ def testUpdatesStatsCounterOnOutputPluginSuccess(self): client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs(), - output_plugins=[plugin_descriptor]) + args=self.ClientFileFinderHuntArgs(), + output_plugins=[plugin_descriptor], + ) def testUpdatesStatsCounterOnOutputPluginFailure(self): plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor( @@ -564,8 +578,9 @@ def testUpdatesStatsCounterOnOutputPluginFailure(self): client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs(), - output_plugins=[plugin_descriptor]) + args=self.ClientFileFinderHuntArgs(), + output_plugins=[plugin_descriptor], + ) def _CheckHuntStoppedNotification(self, str_match): pending = self.GetUserNotifications(self.test_username) @@ -579,7 +594,8 @@ def testHuntIsStoppedIfCrashNumberOverThreshold(self): client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, crash_limit=3, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) client_mock = flow_test_lib.CrashClientMock() self._RunHunt(client_ids[:2], client_mock=client_mock) @@ -662,7 +678,8 @@ def testHuntIsStoppedIfAveragePerClientCpuUsageTooHigh(self): client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, avg_cpu_seconds_per_client_limit=3, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) with mock.patch.object(hunt, "MIN_CLIENTS_FOR_AVERAGE_THRESHOLDS", 4): @@ -704,11 +721,14 @@ def CheckState(hunt_state, user_cpu_time, system_cpu_time): client_mock=hunt_test_lib.SampleHuntMock( user_cpu_time=2, system_cpu_time=4, failrate=-1)) - # Hunt should be terminated: the average is exceeded. - CheckState(rdf_hunt_objects.Hunt.HuntState.STOPPED, 6, 12) + # TODO: Re-enable the checks after the test is reworked to + # run with approximate limits (flow not persisted in the DB every time). - self._CheckHuntStoppedNotification( - "reached the average CPU seconds per client") + # # Hunt should be terminated: the average is exceeded. + # CheckState(rdf_hunt_objects.Hunt.HuntState.STOPPED, 6, 12) + + # self._CheckHuntStoppedNotification( + # "reached the average CPU seconds per client") def testHuntIsStoppedIfAveragePerClientNetworkUsageTooHigh(self): client_ids = self.SetupClients(5) @@ -717,7 +737,8 @@ def testHuntIsStoppedIfAveragePerClientNetworkUsageTooHigh(self): client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, avg_network_bytes_per_client_limit=1, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) with mock.patch.object(hunt, "MIN_CLIENTS_FOR_AVERAGE_THRESHOLDS", 4): @@ -758,11 +779,14 @@ def CheckState(hunt_state, network_bytes_sent): client_mock=hunt_test_lib.SampleHuntMock( network_bytes_sent=2, failrate=-1)) - # Hunt should be terminated: the limit is exceeded. - CheckState(rdf_hunt_objects.Hunt.HuntState.STOPPED, 6) + # TODO: Re-enable the checks after the test is reworked to + # run with approximate limits (flow not persisted in the DB every time). - self._CheckHuntStoppedNotification( - "reached the average network bytes per client") + # # Hunt should be terminated: the limit is exceeded. + # CheckState(rdf_hunt_objects.Hunt.HuntState.STOPPED, 6) + + # self._CheckHuntStoppedNotification( + # "reached the average network bytes per client") def testHuntIsStoppedIfTotalNetworkUsageIsTooHigh(self): client_ids = self.SetupClients(5) @@ -771,7 +795,8 @@ def testHuntIsStoppedIfTotalNetworkUsageIsTooHigh(self): client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, total_network_bytes_limit=5, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) def CheckState(hunt_state, network_bytes_sent): hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) @@ -799,15 +824,18 @@ def CheckState(hunt_state, network_bytes_sent): [client_ids[3]], client_mock=hunt_test_lib.SampleHuntMock(network_bytes_sent=1)) - # 6 is greater than the total limit. The hunt should be stopped now. - CheckState(rdf_hunt_objects.Hunt.HuntState.STOPPED, 6) + # TODO: Re-enable the checks after the test is reworked to + # run with approximate limits (flow not persisted in the DB every time). + + # # 6 is greater than the total limit. The hunt should be stopped now. + # CheckState(rdf_hunt_objects.Hunt.HuntState.STOPPED, 6) - self._RunHunt([client_ids[4]], - client_mock=hunt_test_lib.SampleHuntMock( - network_bytes_sent=2, failrate=-1)) + # self._RunHunt([client_ids[4]], + # client_mock=hunt_test_lib.SampleHuntMock( + # network_bytes_sent=2, failrate=-1)) - self._CheckHuntStoppedNotification( - "reached the total network bytes sent limit") + # self._CheckHuntStoppedNotification( + # "reached the total network bytes sent limit") def testHuntIsStoppedWhenExpirationTimeIsReached(self): client_ids = self.SetupClients(5) @@ -823,7 +851,8 @@ def testHuntIsStoppedWhenExpirationTimeIsReached(self): client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, duration=duration, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) client_mock = hunt_test_lib.SampleHuntMock(failrate=-1) foreman_obj = foreman.Foreman() @@ -857,7 +886,8 @@ def testPausingTheHuntChangingParametersAndStartingAgainWorksAsExpected(self): client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, client_limit=1, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) self._RunHunt(client_ids[:2]) hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) @@ -880,7 +910,8 @@ def testResourceUsageStatsAreReportedCorrectly(self): client_mock=hunt_test_lib.SampleHuntMock(failrate=-1), client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) usage_stats = data_store.REL_DB.ReadHuntClientResourcesStats(hunt_id) @@ -941,7 +972,8 @@ def testCreatorUsernameIsPropagatedToChildrenFlows(self): num_clients=1, client_rule_set=foreman_rules.ForemanClientRuleSet(), client_rate=0, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) self.assertEqual(hunt_obj.creator, self.test_username) @@ -957,7 +989,8 @@ def testPerClientLimitsArePropagatedToChildrenFlows(self): client_rate=0, per_client_cpu_limit=42, per_client_network_bytes_limit=43, - args=self.GetFileHuntArgs()) + args=self.ClientFileFinderHuntArgs(), + ) hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id) self.assertEqual(hunt_obj.creator, self.test_username) @@ -1055,7 +1088,7 @@ def testHuntURNFromID(self): def testScheduleHuntRaceCondition(self): client_id = self.SetupClient(0) - hunt_id = self._CreateHunt(args=self.GetFileHuntArgs()) + hunt_id = self._CreateHunt(args=self.ClientFileFinderHuntArgs()) original = data_store.REL_DB.delegate.WriteFlowObject def WriteFlowObject(*args, **kwargs): diff --git a/grr/server/grr_response_server/output_plugins/email_plugin.py b/grr/server/grr_response_server/output_plugins/email_plugin.py index 2824203161..6b442c25a3 100644 --- a/grr/server/grr_response_server/output_plugins/email_plugin.py +++ b/grr/server/grr_response_server/output_plugins/email_plugin.py @@ -39,14 +39,15 @@ class EmailOutputPlugin(output_plugin.OutputPlugin): Grr just got a response in {{ source_urn }} from client {{ client_id }} ({{ hostname }}).

- Click here to + Click here to access this machine.

{{ additional_message }}

Thanks,

{{ signature }}

""", - autoescape=True) + autoescape=True, + ) too_many_mails_msg = ("

This hunt has now produced %d results so the " "sending of emails will be disabled now.

") diff --git a/grr/server/grr_response_server/rdfvalues/flow_objects.py b/grr/server/grr_response_server/rdfvalues/flow_objects.py index 4af2e5d328..6e360a528f 100644 --- a/grr/server/grr_response_server/rdfvalues/flow_objects.py +++ b/grr/server/grr_response_server/rdfvalues/flow_objects.py @@ -10,7 +10,6 @@ from grr_response_core.lib.rdfvalues import protodict as rdf_protodict from grr_response_core.lib.rdfvalues import structs as rdf_structs from grr_response_proto import flows_pb2 -from grr_response_server import action_registry from grr_response_server import output_plugin from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner from grr_response_server.rdfvalues import objects as rdf_objects @@ -269,24 +268,6 @@ def FlowResponseForLegacyResponse(legacy_msg): return response -def GRRMessageFromClientActionRequest(request): - stub = action_registry.ACTION_STUB_BY_ID[request.action_identifier] - name = stub.__name__ - - return rdf_flows.GrrMessage( - session_id="%s/%s" % (request.client_id, request.flow_id), - name=name, - request_id=request.request_id, - queue=rdf_client.ClientURN(request.client_id), - payload=request.action_args, - cpu_limit=request.cpu_limit_ms / 1000.0, - network_bytes_limit=request.network_bytes_limit, - runtime_limit_us=request.runtime_limit_us, - # Legacy clients will fail if the task id is not set. - # TODO(amoser): Remove task ids after April 2021. - generate_task_id=True) - - class ScheduledFlow(rdf_structs.RDFProtoStruct): """A scheduled flow, to be executed after approval has been granted.""" protobuf = flows_pb2.ScheduledFlow diff --git a/grr/server/grr_response_server/rdfvalues/objects.py b/grr/server/grr_response_server/rdfvalues/objects.py index 78f1299ae4..42c12a253e 100644 --- a/grr/server/grr_response_server/rdfvalues/objects.py +++ b/grr/server/grr_response_server/rdfvalues/objects.py @@ -132,9 +132,14 @@ def GetSummary(self): summary.users = kb.users summary.interfaces = self.interfaces summary.client_info = self.startup_info.client_info - if kb.os_release: + + # We use knowledge base release information only if it was not set already + # (and the same applies to the version information). This is because the + # knowledge base information comes from artifact definitions that are less + # precise than platform information obtained through the `distro` package. + if not summary.system_info.release and kb.os_release: summary.system_info.release = kb.os_release - if kb.os_major_version: + if not summary.system_info.version and kb.os_major_version: summary.system_info.version = "%d.%d" % (kb.os_major_version, kb.os_minor_version) @@ -696,11 +701,23 @@ class ClientPathID(rdf_structs.RDFProtoStruct): class BlobReference(rdf_structs.RDFProtoStruct): + """A reference to a blob.""" protobuf = objects_pb2.BlobReference rdf_deps = [ BlobID, ] + @classmethod + def FromBlobImageChunkDescriptor( + cls, + chunk: rdf_client_fs.BlobImageChunkDescriptor, + ) -> "BlobReference": + result = cls() + result.offset = chunk.offset + result.size = chunk.length + result.blob_id = chunk.digest + return result + class BlobReferences(rdf_structs.RDFProtoStruct): protobuf = objects_pb2.BlobReferences diff --git a/grr/server/grr_response_server/rdfvalues/objects_test.py b/grr/server/grr_response_server/rdfvalues/objects_test.py index d202c994ee..2bfae7d188 100644 --- a/grr/server/grr_response_server/rdfvalues/objects_test.py +++ b/grr/server/grr_response_server/rdfvalues/objects_test.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - import os import tempfile @@ -144,6 +143,44 @@ def testGetSummaryEdrAgents(self): self.assertEqual(summary.edr_agents[0].agent_id, "1337") self.assertEqual(summary.edr_agents[1].agent_id, "108") + def testGetSummaryOsReleaseSnapshot(self): + snapshot = rdf_objects.ClientSnapshot() + snapshot.client_id = "C.0123456789012345" + snapshot.os_release = "Rocky Linux" + snapshot.knowledge_base.os_release = "RedHat Linux" + + summary = snapshot.GetSummary() + self.assertEqual(summary.system_info.release, "Rocky Linux") + + def testGetSummaryOsReleaseKnowledgeBase(self): + snapshot = rdf_objects.ClientSnapshot() + snapshot.client_id = "C.0123456789012345" + snapshot.knowledge_base.os_release = "RedHat Linux" + + summary = snapshot.GetSummary() + self.assertEqual(summary.system_info.release, "RedHat Linux") + + def testGetSummaryOsVersionSnapshot(self): + snapshot = rdf_objects.ClientSnapshot() + snapshot.client_id = "C.0123456789012345" + snapshot.os_version = "13.37" + snapshot.knowledge_base.os_release = "RedHat Linux" + snapshot.knowledge_base.os_major_version = 4 + snapshot.knowledge_base.os_minor_version = 2 + + summary = snapshot.GetSummary() + self.assertEqual(summary.system_info.version, "13.37") + + def testGetSummaryOsVersionKnowledgeBase(self): + snapshot = rdf_objects.ClientSnapshot() + snapshot.client_id = "C.0123456789012345" + snapshot.knowledge_base.os_release = "RedHat Linux" + snapshot.knowledge_base.os_major_version = 4 + snapshot.knowledge_base.os_minor_version = 2 + + summary = snapshot.GetSummary() + self.assertEqual(summary.system_info.version, "4.2") + class PathIDTest(rdf_test_base.RDFValueTestMixin, test_lib.GRRBaseTest): rdfvalue_class = rdf_objects.PathID @@ -614,6 +651,22 @@ def testGetEmail_customEmailEnabled(self): self.assertEqual("bar@baz.org", u.GetEmail()) +class BlobReferenceTest(absltest.TestCase): + + def testFromBlobImageChunkDescriptor(self): + blob_id = os.urandom(32) + + chunk = rdf_client_fs.BlobImageChunkDescriptor() + chunk.offset = 42 + chunk.length = 1337 + chunk.digest = blob_id + + blob_ref = rdf_objects.BlobReference.FromBlobImageChunkDescriptor(chunk) + self.assertEqual(blob_ref.offset, 42) + self.assertEqual(blob_ref.size, 1337) + self.assertEqual(blob_ref.blob_id, blob_id) + + def main(argv): # Run the full test suite test_lib.main(argv) diff --git a/grr/server/grr_response_server/server_logging.py b/grr/server/grr_response_server/server_logging.py index 3daa27c658..d3a1094054 100644 --- a/grr/server/grr_response_server/server_logging.py +++ b/grr/server/grr_response_server/server_logging.py @@ -10,6 +10,7 @@ from absl import flags from grr_response_core import config +from grr_response_core.stats import metrics from grr_response_server import data_store from grr_response_server.rdfvalues import objects as rdf_objects @@ -30,6 +31,8 @@ allow_override=True, ) +LOG_CALLS_COUNTER = metrics.Counter("log_calls", fields=[("level", str)]) + class GrrApplicationLogger(object): """The GRR application logger. @@ -120,6 +123,32 @@ def handleError(self, record): """Just ignore socket errors - the syslog server might come back.""" +class ErrorLogsHandler(logging.Handler): + """Logging Handler that exposes error log count in metrics.""" + + def __init__(self, *args, **kwargs): + """Initializes LogMetricsHandler.""" + super().__init__(*args, **kwargs) + + self.setLevel(logging.ERROR) + + def emit(self, record: logging.LogRecord): + """Overrides Handler.emit().""" + # From https://docs.python.org/3/library/logging.html#logging.Logger + # logging.error() and logging.exception() log with level ERROR. + # logging.critical() logs with level CRITICAL. + if record.levelno == logging.ERROR: + LOG_CALLS_COUNTER.Increment(fields=["ERROR"]) + elif record.levelno == logging.CRITICAL: + LOG_CALLS_COUNTER.Increment(fields=["CRITICAL"]) + + +def InitErrorLogsMonitoring(): + """Sets up error logs monitoring.""" + logging.root.addHandler(ErrorLogsHandler()) + logging.info("Initialized ErrorLogsHandler.") + + BASE_LOG_LEVELS = { "FileHandler": logging.ERROR, "NTEventLogHandler": logging.CRITICAL, @@ -249,6 +278,8 @@ def ServerLoggingStartupInit(): LogInit() LOGGER = AppLogInit() + InitErrorLogsMonitoring() + def SetTestVerbosity(): if local_log: diff --git a/grr/server/grr_response_server/server_logging_test.py b/grr/server/grr_response_server/server_logging_test.py index a3654c1d0b..be65f04157 100644 --- a/grr/server/grr_response_server/server_logging_test.py +++ b/grr/server/grr_response_server/server_logging_test.py @@ -2,18 +2,20 @@ """Tests for logging classes.""" import logging - import time from unittest import mock from absl import app +from grr_response_core.stats import default_stats_collector +from grr_response_core.stats import metrics from grr_response_proto import jobs_pb2 from grr_response_server import server_logging from grr_response_server.gui import api_call_context from grr_response_server.gui import http_response from grr_response_server.gui import wsgiapp from grr.test_lib import acl_test_lib +from grr.test_lib import stats_test_lib from grr.test_lib import test_lib @@ -80,6 +82,53 @@ def _GenHttpRequestProto(self): return request +class ErrorLogHandlerTests(stats_test_lib.StatsCollectorTestMixin): + """Store tests.""" + + def _SetupTestLogger(self, unique_name: str) -> logging.Logger: + test_logger = logging.getLogger(unique_name) + error_log_handler = server_logging.ErrorLogsHandler() + + test_logger.addHandler(error_log_handler) + self.addCleanup(lambda: test_logger.removeHandler(error_log_handler)) + + return test_logger + + def testErrorLogHandlerWorks(self): + # Get logger unique to this test + test_logger = self._SetupTestLogger(self.testErrorLogHandlerWorks.__name__) + + with self.SetUpStatsCollector( + default_stats_collector.DefaultStatsCollector() + ): + fake_counter = metrics.Counter( + "fake", + fields=[ + ("level", str), + ], + ) + + with mock.patch.object(server_logging, "LOG_CALLS_COUNTER", fake_counter): + # Make sure counter is set to zero + self.assertEqual(0, fake_counter.GetValue(fields=["ERROR"])) + self.assertEqual(0, fake_counter.GetValue(fields=["CRITICAL"])) + + # Log an error + test_logger.error("oh no!") + self.assertEqual(1, fake_counter.GetValue(fields=["ERROR"])) + self.assertEqual(0, fake_counter.GetValue(fields=["CRITICAL"])) + + # Log an exception + test_logger.exception("not again!") + self.assertEqual(2, fake_counter.GetValue(fields=["ERROR"])) + self.assertEqual(0, fake_counter.GetValue(fields=["CRITICAL"])) + + # Log critical error + test_logger.critical("I give up!") + self.assertEqual(2, fake_counter.GetValue(fields=["ERROR"])) + self.assertEqual(1, fake_counter.GetValue(fields=["CRITICAL"])) + + def main(argv): del argv # Unused. test_lib.main() diff --git a/grr/server/grr_response_server/server_stubs.py b/grr/server/grr_response_server/server_stubs.py index 650e7bb5c1..2da721f9bf 100644 --- a/grr/server/grr_response_server/server_stubs.py +++ b/grr/server/grr_response_server/server_stubs.py @@ -7,7 +7,6 @@ """ from grr_response_core.lib import rdfvalue -from grr_response_core.lib.rdfvalues import apple_firmware as rdf_apple_firmware from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts from grr_response_core.lib.rdfvalues import chipsec_types as rdf_chipsec_types from grr_response_core.lib.rdfvalues import client as rdf_client @@ -16,6 +15,7 @@ from grr_response_core.lib.rdfvalues import client_network as rdf_client_network from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats from grr_response_core.lib.rdfvalues import cloud as rdf_cloud +from grr_response_core.lib.rdfvalues import dummy as rdf_dummy from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder from grr_response_core.lib.rdfvalues import flows as rdf_flows from grr_response_core.lib.rdfvalues import large_file as rdf_large_file @@ -62,12 +62,6 @@ class EnumerateFilesystems(ClientActionStub): out_rdfvalues = [rdf_client_fs.Filesystem] -class Uninstall(ClientActionStub): - """Remove the service that starts us at startup.""" - - out_rdfvalues = [rdf_protodict.DataBlob] - - class UpdateAgent(ClientActionStub): """Updates the GRR agent to a new version.""" @@ -259,13 +253,6 @@ class ListProcesses(ClientActionStub): out_rdfvalues = [rdf_client.Process] -class SendFile(ClientActionStub): - """This action encrypts and sends a file to a remote listener.""" - - in_rdfvalue = rdf_client_action.SendFileRequest - out_rdfvalues = [rdf_client_fs.StatEntry] - - class StatFS(ClientActionStub): """Call os.statvfs for a given list of paths. OS X and Linux only.""" @@ -406,22 +393,15 @@ class Timeline(ClientActionStub): out_rdfvalues = [rdf_timeline.TimelineResult] -class EficheckDumpImage(ClientActionStub): - """Stub client action to collect the full EFI image via Apple eficheck.""" - - in_rdfvalue = rdf_apple_firmware.EficheckConfig - out_rdfvalues = [rdf_apple_firmware.DumpEfiImageResponse] - - -class EficheckCollectHashes(ClientActionStub): - """A stub client action to collect the EFI hashes via eficheck.""" - - in_rdfvalue = rdf_apple_firmware.EficheckConfig - out_rdfvalues = [rdf_apple_firmware.CollectEfiHashesResponse] - - class ReadLowLevel(ClientActionStub): """Reads `length` bytes from `path` starting at `offset` and returns it.""" in_rdfvalue = rdf_read_low_level.ReadLowLevelRequest out_rdfvalues = [rdf_read_low_level.ReadLowLevelResult] + + +class Dummy(ClientActionStub): + """Dummy example. Reads a message and sends it back.""" + + in_rdfvalue = rdf_dummy.DummyRequest + out_rdfvalues = [rdf_dummy.DummyResult] diff --git a/grr/test/grr_response_test/end_to_end_tests/tests/__init__.py b/grr/test/grr_response_test/end_to_end_tests/tests/__init__.py index 1912aa8599..7f1354cbf7 100644 --- a/grr/test/grr_response_test/end_to_end_tests/tests/__init__.py +++ b/grr/test/grr_response_test/end_to_end_tests/tests/__init__.py @@ -6,7 +6,6 @@ from grr_response_test.end_to_end_tests.tests import discovery from grr_response_test.end_to_end_tests.tests import file_finder from grr_response_test.end_to_end_tests.tests import filesystem -from grr_response_test.end_to_end_tests.tests import fingerprint from grr_response_test.end_to_end_tests.tests import limits from grr_response_test.end_to_end_tests.tests import memory from grr_response_test.end_to_end_tests.tests import network diff --git a/grr/test/grr_response_test/end_to_end_tests/tests/dummy.py b/grr/test/grr_response_test/end_to_end_tests/tests/dummy.py new file mode 100644 index 0000000000..6e6f356079 --- /dev/null +++ b/grr/test/grr_response_test/end_to_end_tests/tests/dummy.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +"""End to end tests for GRR dummy example flow.""" + +from grr_response_test.end_to_end_tests import test_base + + +class TestDummyUnix(test_base.EndToEndTest): + """TestDummy test.""" + + platforms = [ + test_base.EndToEndTest.Platform.LINUX, + test_base.EndToEndTest.Platform.DARWIN, + ] + + def runTest(self): + args = self.grr_api.types.CreateFlowArgs(flow_name="Dummy") + args.flow_input = "abc, abc, toda criança tem que ler e escrever" + f = self.RunFlowAndWait("Dummy", args=args) + + results = list(f.ListResults()) + self.assertTrue(results) + + self.assertIn("abc, abc, toda criança tem que ler e escrever", results) + self.assertIn("flow_input", results) + self.assertIn("action_input", results) + self.assertIn("action_output", results) + + logs = "\n".join(l.log_message for l in f.ListLogs()) + self.assertIn("Finished Start.", logs) + self.assertIn("Finished ReceiveActionOutput.", logs) + + self.assertTrue(False) + + +class TestDummyWindows(test_base.EndToEndTest): + """TestDummy test for Windows.""" + + platforms = [test_base.EndToEndTest.Platform.WINDOWS] + + def runTest(self): + args = self.grr_api.types.CreateFlowArgs(flow_name="Dummy") + args.flow_input = "abc, abc, toda criança tem que ler e escrever" + f = self.RunFlowAndWait("Dummy", args=args) + + results = list(f.ListResults()) + self.assertTrue(results) + + self.assertIn("abc, abc, toda criança tem que ler e escrever", results) + self.assertIn("flow_input", results) + self.assertIn("action_input", results) + self.assertIn("action_output", results) + self.assertIn("WIN", results) + + logs = "\n".join(l.log_message for l in f.ListLogs()) + self.assertIn("Finished Start.", logs) + self.assertIn("Finished ReceiveActionOutput.", logs) + + self.assertTrue(False) diff --git a/grr/test/grr_response_test/end_to_end_tests/tests/filesystem.py b/grr/test/grr_response_test/end_to_end_tests/tests/filesystem.py index 67810f0bd9..ce58f56fe7 100644 --- a/grr/test/grr_response_test/end_to_end_tests/tests/filesystem.py +++ b/grr/test/grr_response_test/end_to_end_tests/tests/filesystem.py @@ -81,68 +81,6 @@ def runTest(self): self.RunFlowAndWait("RecursiveListDirectory", args=args) -# TODO(amoser): Find a way to run this on Darwin with Filevault turned on. -class TestFindTSKLinux(test_base.EndToEndTest): - """Tests if the find flow works on Linux and Darwin using Sleuthkit.""" - - platforms = [test_base.EndToEndTest.Platform.LINUX] - - def runTest(self): - if self.os_release == "CentOS Linux": - self.skipTest( - "TSK is not supported on CentOS due to an xfs root filesystem.") - - args = self.grr_api.types.CreateFlowArgs("FindFiles") - # Cut down the number of files by specifying a partial regex - # match, we just want to find /usr/bin/diff, when run on a real - # system there are thousands which takes forever with TSK. - # TODO - args.findspec.max_depth = 1 - args.findspec.path_regex = "di" - args.findspec.pathspec.path = "/usr/bin" - args.findspec.pathspec.pathtype = args.findspec.pathspec.TSK - - f = self.RunFlowAndWait("FindFiles", args=args) - - results = list(f.ListResults()) - self.assertNotEmpty(results) - - diff_path = None - for r in results: - path = "fs/tsk" - pathspec = r.payload.pathspec - while pathspec.path: - path += pathspec.path - pathspec = pathspec.nested_path - - if path.endswith("/diff"): - diff_path = path - break - - self.assertTrue(diff_path) - - with self.WaitForFileRefresh(diff_path): - self.RunFlowAndWait("FindFiles", args=args) - - -class TestFindOSLinuxDarwin(test_base.EndToEndTest): - """Tests if the find flow works on Linux and Darwin.""" - - platforms = [ - test_base.EndToEndTest.Platform.LINUX, - test_base.EndToEndTest.Platform.DARWIN - ] - - def runTest(self): - args = self.grr_api.types.CreateFlowArgs("FindFiles") - args.findspec.path_regex = "^l" - args.findspec.pathspec.path = "/bin" - args.findspec.pathspec.pathtype = args.findspec.pathspec.OS - - with self.WaitForFileRefresh("fs/os/bin/ls"): - self.RunFlowAndWait("FindFiles", args=args) - - ########### # Windows # ########### diff --git a/grr/test/grr_response_test/end_to_end_tests/tests/fingerprint.py b/grr/test/grr_response_test/end_to_end_tests/tests/fingerprint.py deleted file mode 100644 index c47f77aaaa..0000000000 --- a/grr/test/grr_response_test/end_to_end_tests/tests/fingerprint.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -"""End to end tests for GRR fingerprint-related flows.""" - -from grr_response_test.end_to_end_tests import test_base - - -class TestFingerprintFileOSLinux(test_base.EndToEndTest): - """Tests if Fingerprinting works on Linux.""" - - platforms = [ - test_base.EndToEndTest.Platform.LINUX, - ] - - def runTest(self): - args = self.grr_api.types.CreateFlowArgs("FingerprintFile") - args.pathspec.path = "/bin/ls" - args.pathspec.pathtype = args.pathspec.OS - - with self.WaitForFileRefresh("fs/os/bin/ls"): - f = self.RunFlowAndWait("FingerprintFile", args=args) - - results = list(f.ListResults()) - self.assertNotEmpty(results) - - fingerprint_result = results[0].payload - self.assertLen(fingerprint_result.hash_entry.md5, 16) - self.assertLen(fingerprint_result.hash_entry.sha1, 20) - self.assertLen(fingerprint_result.hash_entry.sha256, 32) - - -class TestFingerprintFileOSWindows(test_base.EndToEndTest): - """Tests if Fingerprinting works on Windows.""" - - platforms = [ - test_base.EndToEndTest.Platform.WINDOWS, - ] - - def runTest(self): - args = self.grr_api.types.CreateFlowArgs("FingerprintFile") - args.pathspec.path = "C:\\Windows\\regedit.exe" - args.pathspec.pathtype = args.pathspec.OS - - with self.WaitForFileRefresh("fs/os/C:/Windows/regedit.exe"): - self.RunFlowAndWait("FingerprintFile", args=args) diff --git a/grr/test/grr_response_test/end_to_end_tests/tests/registry.py b/grr/test/grr_response_test/end_to_end_tests/tests/registry.py index be7483313d..d3d95a2906 100644 --- a/grr/test/grr_response_test/end_to_end_tests/tests/registry.py +++ b/grr/test/grr_response_test/end_to_end_tests/tests/registry.py @@ -41,20 +41,6 @@ def testListDirectory(self): results = list(f.ListResults()) self.assertNotEmpty(results) - def testFindFiles(self): - args = self.grr_api.types.CreateFlowArgs("FindFiles") - args.findspec.pathspec.path = self.__class__.REG_PATH - args.findspec.pathspec.pathtype = args.findspec.pathspec.REGISTRY - args.findspec.path_regex = "ProfileImagePath" - - f = self.RunFlowAndWait("FindFiles", args=args) - - results = list(f.ListResults()) - self.assertNotEmpty(results) - - for r in results: - self.assertIn("ProfileImagePath", r.payload.pathspec.path) - def testClientFileFinderWithRegistryPath(self): base = "/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows NT/CurrentVersion" args = self.grr_api.types.CreateFlowArgs("ClientFileFinder") diff --git a/grr/test/grr_response_test/run_self_update_test.py b/grr/test/grr_response_test/run_self_update_test.py deleted file mode 100644 index 20e7d9b336..0000000000 --- a/grr/test/grr_response_test/run_self_update_test.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/env python -"""Helper script for running end-to-end tests.""" - -import logging -import platform -import subprocess -import sys - -from typing import Sequence - -from absl import app -from absl import flags - -import distro - -from grr_api_client import errors -from grr_response_test.lib import api_helpers -from grr_response_test.lib import self_contained_components - -_MYSQL_DATABASE = flags.DEFINE_string("mysql_database", "grr_test_db", - "MySQL database name to use.") - -_FLEETSPEAK_MYSQL_DATABASE = flags.DEFINE_string( - "fleetspeak_mysql_database", - "fleetspeak_test_db", - "MySQL database name to use for Fleetspeak.", -) - - -_MYSQL_USERNAME = flags.DEFINE_string("mysql_username", None, - "MySQL username to use.") - -_MYSQL_PASSWORD = flags.DEFINE_string("mysql_password", None, - "MySQL password to use.") - -_LOGGING_PATH = flags.DEFINE_string( - "logging_path", None, "Base logging path for server components to use.") - -_HIGHEST_VERSION_INI = """ -[Version] -major = 9 -minor = 9 -revision = 9 -release = 9 -packageversion = %(major)s.%(minor)s.%(revision)spost%(release)s -packagedepends = %(packageversion)s -""" - - -def _check_call_print_output(cmd: Sequence[str]): - try: - return subprocess.check_output(cmd) - except subprocess.CalledProcessError as e: - logging.info(e.stdout) - logging.error(e.stderr) - raise - - -def main(argv): - del argv # Unused. - - if _MYSQL_USERNAME.value is None: - raise ValueError("--mysql_username has to be specified.") - - # Generate server and client configs. - grr_configs = self_contained_components.InitGRRConfigs( - _MYSQL_DATABASE.value, - mysql_username=_MYSQL_USERNAME.value, - mysql_password=_MYSQL_PASSWORD.value, - logging_path=_LOGGING_PATH.value) - - fleetspeak_configs = self_contained_components.InitFleetspeakConfigs( - grr_configs, - _FLEETSPEAK_MYSQL_DATABASE.value, - mysql_username=_MYSQL_USERNAME.value, - mysql_password=_MYSQL_PASSWORD.value, - logging_path=_LOGGING_PATH.value, - ) - - print("Building the template.") - template_path = self_contained_components.RunBuildTemplate( - grr_configs.server_config, component_options={"Logging.verbose": True}) - - print("Repack %s." % template_path) - installer_path = self_contained_components.RunRepackTemplate( - grr_configs.server_config, template_path) - - version_overrides = { - "Source.version_major": 9, - "Source.version_minor": 9, - "Source.version_revision": 9, - "Source.version_release": 9, - "Source.version_string": "9.9.9.9", - "Source.version_numeric": 9999, - "Template.version_major": 9, - "Template.version_minor": 9, - "Template.version_revision": 9, - "Template.version_release": 9, - "Template.version_string": "9.9.9.9", - "Template.version_numeric": 9999, - } - - print("Building next ver. template.") - next_ver_template_path = self_contained_components.RunBuildTemplate( - grr_configs.server_config, - component_options=version_overrides, - version_ini=_HIGHEST_VERSION_INI) - - print("Repack next ver. %s." % template_path) - next_ver_installer_path = self_contained_components.RunRepackTemplate( - grr_configs.server_config, - next_ver_template_path, - component_options=version_overrides) - - print("First installer ready: %s. Next ver. installer ready: %s." % - (installer_path, next_ver_installer_path)) - - print("Starting the server.") - # Start all remaining server components. - # Start a background thread that kills the main process if one of the - # server subprocesses dies. - server_processes = self_contained_components.StartServerProcesses( - grr_configs, fleetspeak_configs - ) - self_contained_components.DieIfSubProcessDies(server_processes) - - api_port = api_helpers.GetAdminUIPortFromConfig(grr_configs.server_config) - grrapi = api_helpers.WaitForAPIEndpoint(api_port) - - print("Installing the client.") - system = platform.system().lower() - if system == "linux": - distro_id = distro.id() - if distro_id in ["ubuntu", "debian"]: - _check_call_print_output( - ["apt", "install", "--reinstall", "-y", installer_path]) - elif distro_id in ["centos", "rhel", "fedora"]: - _check_call_print_output(["rpm", "-Uvh", installer_path]) - else: - raise RuntimeError("Unsupported linux distro: %s" % distro_id) - elif system == "windows": - _check_call_print_output([installer_path]) - elif system == "darwin": - _check_call_print_output( - ["installer", "-verbose", "-pkg", installer_path, "-target", "/"]) - else: - raise RuntimeError("Unsupported platform for self-update tests: %s" % - system) - - # Wait for the client to enroll and get its id. - client_id = api_helpers.WaitForClientToEnroll(grrapi) - print("Found client id: %s" % client_id) - - print("Waiting for the client to report the initial version.") - prev_version = api_helpers.WaitForClientVersionGreaterThan( - grrapi.Client(client_id), 0) - - binary_id = self_contained_components.RunUploadExe(grr_configs.server_config, - next_ver_installer_path, - system) - - args = grrapi.types.CreateFlowArgs(flow_name="UpdateClient") - args.binary_path = binary_id - f = grrapi.Client(client_id).CreateFlow(name="UpdateClient", args=args) - try: - # Timeout has to be rather significant, since at the moment installers - # are uploaded in chunks of 512Kb, each chunk requiring a round-trip - # to/from the client. - f.WaitUntilDone(timeout=180) - print("Update flow finished successfully. This should never happen: " - "the client should have been restarted.") - sys.exit(-1) - except errors.PollTimeoutError: - print("Update flow timed out. This shouldn't happen: the flow should " - "fail explicitly due to a client restart.") - sys.exit(-1) - except errors.FlowFailedError: - print("Update flow failed (expected behavior, as the client got " - "restarted).") - - print("Update flow details:") - print(f.Get().data) - - print("Waiting for the client to report the updated version.") - api_helpers.WaitForClientVersionGreaterThan( - grrapi.Client(client_id), prev_version) - - print("Self-update test successful!") - - sys.exit(0) - - -if __name__ == "__main__": - app.run(main) diff --git a/grr/test/grr_response_test/test_data/parser_test/redhat-release b/grr/test/grr_response_test/test_data/parser_test/redhat-release new file mode 100644 index 0000000000..db5baab4cc --- /dev/null +++ b/grr/test/grr_response_test/test_data/parser_test/redhat-release @@ -0,0 +1 @@ +Red Hat Enterprise Linux Server release 7.9 (Maipo) diff --git a/grr/test/grr_response_test/test_data/parser_test/rocky-release b/grr/test/grr_response_test/test_data/parser_test/rocky-release new file mode 100644 index 0000000000..a8553542cd --- /dev/null +++ b/grr/test/grr_response_test/test_data/parser_test/rocky-release @@ -0,0 +1 @@ +Rocky Linux release 8.8 (Green Obsidian) diff --git a/grr/test_lib/action_mocks.py b/grr/test_lib/action_mocks.py index 0681d17077..970eeb4d1d 100644 --- a/grr/test_lib/action_mocks.py +++ b/grr/test_lib/action_mocks.py @@ -384,7 +384,13 @@ def HandleMessage(self, message): return responses -class ListProcessesMock(FileFinderClientMock): +class ClientFileFinderClientMock(ActionMock): + + def __init__(self, *args, **kwargs): + super().__init__(file_finder.FileFinderOS, *args, **kwargs) + + +class ListProcessesMock(ClientFileFinderClientMock): """Client with real file actions and mocked-out ListProcesses.""" def __init__(self, processes_list): @@ -395,12 +401,6 @@ def ListProcesses(self, _): return self.processes_list -class ClientFileFinderClientMock(ActionMock): - - def __init__(self, *args, **kwargs): - super().__init__(file_finder.FileFinderOS, *args, **kwargs) - - class CollectMultipleFilesClientMock(ActionMock): def __init__(self, *args, **kwargs): diff --git a/grr/test_lib/flow_test_lib.py b/grr/test_lib/flow_test_lib.py index e84b167483..23c5a3fd44 100644 --- a/grr/test_lib/flow_test_lib.py +++ b/grr/test_lib/flow_test_lib.py @@ -2,13 +2,13 @@ """Helper classes for flows-related testing.""" import logging +import re import sys -from typing import ContextManager, Iterable, Optional, Text, Type, List +from typing import ContextManager, Iterable, List, Optional, Pattern, Text, Type, Union from unittest import mock from grr_response_client import actions from grr_response_client.client_actions import standard - from grr_response_core.lib import rdfvalue from grr_response_core.lib import registry from grr_response_core.lib.rdfvalues import client as rdf_client @@ -212,6 +212,39 @@ def setUp(self): actions.ActionPlugin.last_progress_time = ( rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)) + def assertFlowLoggedRegex( + self, + client_id: str, + flow_id: str, + regex: Union[str, Pattern[str]], + ) -> None: + """Asserts that the flow logged a message matching the specified expression. + + Args: + client_id: An identifier of the client of which flow we make an assertion. + flow_id: An identifier of the flow on which me make an assertion. + regex: A regex to match against the flow log messages. + """ + del self # Unused. + + assert data_store.REL_DB is not None + + if isinstance(regex, str): + regex = re.compile(regex, re.IGNORECASE) + + logs = data_store.REL_DB.ReadFlowLogEntries( + client_id, + flow_id, + offset=0, + count=sys.maxsize, + ) + for log in logs: + if regex.search(log.message) is not None: + return + + message = f"No logs matching {regex!r} for flow '{client_id}/{flow_id}'" + raise AssertionError(message) + class CrashClientMock(action_mocks.ActionMock): """Client mock that simulates a client crash.""" @@ -301,10 +334,10 @@ def PushToStateQueue(self, message, **kw): [rdf_flow_objects.FlowResponseForLegacyResponse(message)]) def Next(self): - """Emulates execution of a single ClientActionRequest. + """Emulates execution of a single client action request. Returns: - True iff a ClientActionRequest was found for the client. + True if a pending action request was found for the client. """ next_task = fleetspeak_test_lib.PopMessage(self.client_id) if next_task is None: diff --git a/grr/test_lib/hunt_test_lib.py b/grr/test_lib/hunt_test_lib.py index 4edb11ce41..ca12718387 100644 --- a/grr/test_lib/hunt_test_lib.py +++ b/grr/test_lib/hunt_test_lib.py @@ -1,12 +1,10 @@ #!/usr/bin/env python """Classes for hunt-related testing.""" -import hashlib import sys - -from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs +from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder from grr_response_core.lib.rdfvalues import flows as rdf_flows from grr_response_core.lib.rdfvalues import paths as rdf_paths from grr_response_core.lib.rdfvalues import structs as rdf_structs @@ -17,7 +15,7 @@ from grr_response_server import hunt from grr_response_server import output_plugin from grr_response_server.databases import db -from grr_response_server.flows.general import transfer +from grr_response_server.flows.general import file_finder from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner from grr_response_server.rdfvalues import hunt_objects as rdf_hunt_objects @@ -45,20 +43,25 @@ def __init__(self, self.system_cpu_time = system_cpu_time self.network_bytes_sent = network_bytes_sent - def GetFileStat(self, args): - """`GetFileStat` action mock.""" - response = rdf_client_fs.StatEntry( - pathspec=args.pathspec, - st_mode=33184, - st_ino=1063090, - st_dev=64512, - st_nlink=1, - st_uid=139592, - st_gid=5000, - st_size=len(self.data), - st_atime=1336469177, - st_mtime=1336129892, - st_ctime=1336129892) + def FileFinderOS(self, args): + # TODO: Stop relying on these constants. + response = rdf_file_finder.FileFinderResult( + stat_entry=rdf_client_fs.StatEntry( + pathspec=rdf_paths.PathSpec( + path=args.paths[0], pathtype=rdf_paths.PathSpec.PathType.OS + ), + st_mode=33184, + st_ino=1063090, + st_dev=64512, + st_nlink=1, + st_uid=139592, + st_gid=5000, + st_size=len(self.data), + st_atime=1336469177, + st_mtime=1336129892, + st_ctime=1336129892, + ) + ) self.responses += 1 self.count += 1 @@ -66,15 +69,19 @@ def GetFileStat(self, args): # Every "failrate" client does not have this file. if self.count == self.failrate: self.count = 0 - return [] + raise ValueError( + f"FileFinderOS failed as planned, failrate = {self.failrate}" + ) return [response] def GenerateStatusMessage(self, message, response_id, status=None): status = rdf_flows.GrrStatus( - status=status or rdf_flows.GrrStatus.ReturnedStatus.OK) + status=status or rdf_flows.GrrStatus.ReturnedStatus.OK + ) - if message.name == "GetFileStat": + # TODO: Stop relying on these constants. + if message.name == "FileFinderOS": # Create status message to report sample resource usage if self.user_cpu_time is None: status.cpu_time_used.user_cpu_time = self.responses @@ -99,19 +106,6 @@ def GenerateStatusMessage(self, message, response_id, status=None): payload=status, type=rdf_flows.GrrMessage.Type.STATUS) - def TransferBuffer(self, args): - """TransferBuffer action mock.""" - response = rdf_client.BufferReference(args) - - offset = min(args.offset, len(self.data)) - sha256 = hashlib.sha256() - sha256.update(self.data[offset:]) - response.data = sha256.digest() - response.length = len(self.data[offset:]) - data_store.BLOBS.WriteBlobWithUnknownHash(self.data[offset:]) - - return [response] - def TestHuntHelperWithMultipleMocks(client_mocks, iteration_limit=None, @@ -222,15 +216,15 @@ def CreateHunt(self, # Only initialize default flow_args value if default flow_runner_args value # is to be used. if not flow_runner_args: - flow_args = ( - flow_args or transfer.GetFileArgs( - pathspec=rdf_paths.PathSpec( - path="/tmp/evil.txt", - pathtype=rdf_paths.PathSpec.PathType.OS))) + flow_args = rdf_file_finder.FileFinderArgs( + paths=["/tmp/evil.txt"], + pathtype=rdf_paths.PathSpec.PathType.OS, + action=rdf_file_finder.FileFinderAction.Download(), + ) - flow_runner_args = ( - flow_runner_args or - rdf_flow_runner.FlowRunnerArgs(flow_name=transfer.GetFile.__name__)) + flow_runner_args = flow_runner_args or rdf_flow_runner.FlowRunnerArgs( + flow_name=file_finder.ClientFileFinder.__name__ + ) client_rule_set = (client_rule_set or self._CreateForemanClientRuleSet()) @@ -307,9 +301,10 @@ def _EnsureClientHasHunt(self, client_id, hunt_id): data_store.REL_DB.ReadFlowObject(client_id, hunt_id) except db.UnknownFlowError: flow_test_lib.StartFlow( - transfer.GetFile, + file_finder.ClientFileFinder, client_id=client_id, - parent=flow.FlowParent.FromHuntID(hunt_id)) + parent=flow.FlowParent.FromHuntID(hunt_id), + ) return hunt_id diff --git a/grr/test_lib/test_lib.py b/grr/test_lib/test_lib.py index ecff2c5bb0..f66f4cfa9f 100644 --- a/grr/test_lib/test_lib.py +++ b/grr/test_lib/test_lib.py @@ -217,7 +217,8 @@ def _TestClientInfo(self, labels=None): res = rdf_client.ClientInformation( client_name="GRR Monitor", client_version=config.CONFIG["Source.version_numeric"], - build_time="1980-01-01") + build_time="1980-01-01T12:00:00.000000+00:00", + ) if labels is None: res.labels = ["label1", "label2"] else: diff --git a/grr/test_lib/vfs_test_lib.py b/grr/test_lib/vfs_test_lib.py index ff16ba54bb..3cf025fb0a 100644 --- a/grr/test_lib/vfs_test_lib.py +++ b/grr/test_lib/vfs_test_lib.py @@ -289,6 +289,8 @@ def __init__(self, self.pathspec.last.path, pathspec.CollapsePath().lstrip("/")) self.path = self.pathspec.CollapsePath() + if pathspec.file_size_override: + self.size = pathspec.file_size_override @classmethod def FakeRootPath(cls, path): @@ -362,7 +364,9 @@ def OpenKey(self, key, sub_key): raise OSError() def QueryValueEx(self, key, value_name): - full_key = os.path.join(key.value.lower(), value_name).rstrip("/") + res = key.value.replace("\\", "/").rstrip("/") + parts = res.split("/") + full_key = utils.Join(*[p.lower() for p in parts[:-1]] + parts[-1:]) try: stat_entry = self.cache[self.prefix][full_key][1] data = stat_entry.registry_data.GetValue()