diff --git a/grr/server/grr_response_server/bin/worker_test.py b/grr/server/grr_response_server/bin/worker_test.py
index 815f1be5a5..429415b04a 100644
--- a/grr/server/grr_response_server/bin/worker_test.py
+++ b/grr/server/grr_response_server/bin/worker_test.py
@@ -45,8 +45,9 @@ def handle(l):
results = data_store.REL_DB.ReadClientStats(
client_id=client_id,
- min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0),
- max_timestamp=rdfvalue.RDFDatetime.Now())
+ min_timestamp=data_store.REL_DB.MinTimestamp(),
+ max_timestamp=rdfvalue.RDFDatetime.Now(),
+ )
self.assertLen(results, 1)
stats = results[0]
diff --git a/grr/server/grr_response_server/client_index.py b/grr/server/grr_response_server/client_index.py
index 797cbd54f8..f38545a953 100644
--- a/grr/server/grr_response_server/client_index.py
+++ b/grr/server/grr_response_server/client_index.py
@@ -52,8 +52,10 @@ def _NormalizeKeyword(self, keyword):
def _AnalyzeKeywords(self, keywords):
"""Extracts a start time from a list of keywords if present."""
- start_time = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(
- 180, rdfvalue.DAYS)
+ start_time = max(
+ rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(180, rdfvalue.DAYS),
+ data_store.REL_DB.MinTimestamp(),
+ )
filtered_keywords = []
for k in keywords:
diff --git a/grr/server/grr_response_server/databases/db.py b/grr/server/grr_response_server/databases/db.py
index bf56771adb..2b6da39cea 100644
--- a/grr/server/grr_response_server/databases/db.py
+++ b/grr/server/grr_response_server/databases/db.py
@@ -578,6 +578,16 @@ class Database(metaclass=abc.ABCMeta):
def Now(self) -> rdfvalue.RDFDatetime:
"""Retrieves current time as reported by the database."""
+ # Different DB engines might make different assumptions about what a valid
+ # minimal timestamp is.
+ # For example, MySQL doesn't handle sub second fractional timestamps well:
+ # Per https://dev.mysql.com/doc/refman/8.0/en/datetime.html:
+ # "the range for TIMESTAMP values is '1970-01-01 00:00:01.000000' to
+ # '2038-01-19 03:14:07.999999'".
+ @abc.abstractmethod
+ def MinTimestamp(self) -> rdfvalue.RDFDatetime:
+ """Returns minimal timestamp allowed by the DB."""
+
@abc.abstractmethod
def WriteArtifact(self, artifact):
"""Writes new artifact to the database.
@@ -3062,6 +3072,9 @@ def __init__(self, delegate: Database):
def Now(self) -> rdfvalue.RDFDatetime:
return self.delegate.Now()
+ def MinTimestamp(self) -> rdfvalue.RDFDatetime:
+ return self.delegate.MinTimestamp()
+
def WriteArtifact(self, artifact):
precondition.AssertType(artifact, rdf_artifacts.Artifact)
if not artifact.name:
@@ -3185,7 +3198,7 @@ def WriteClientSnapshotHistory(self, clients):
def ReadClientSnapshotHistory(self, client_id, timerange=None):
precondition.ValidateClientId(client_id)
if timerange is not None:
- _ValidateTimeRange(timerange)
+ self._ValidateTimeRange(timerange)
return self.delegate.ReadClientSnapshotHistory(
client_id, timerange=timerange)
@@ -3205,7 +3218,7 @@ def ReadClientStartupInfo(self,
def ReadClientStartupInfoHistory(self, client_id, timerange=None):
precondition.ValidateClientId(client_id)
if timerange is not None:
- _ValidateTimeRange(timerange)
+ self._ValidateTimeRange(timerange)
return self.delegate.ReadClientStartupInfoHistory(
client_id, timerange=timerange)
@@ -3250,7 +3263,7 @@ def ListClientsForKeywords(
keywords = set(keywords)
if start_time:
- _ValidateTimestamp(start_time)
+ self._ValidateTimestamp(start_time)
result = self.delegate.ListClientsForKeywords(
keywords, start_time=start_time)
@@ -3324,12 +3337,12 @@ def ReadClientStats(
if min_timestamp is None:
min_timestamp = rdfvalue.RDFDatetime.Now() - CLIENT_STATS_RETENTION
else:
- _ValidateTimestamp(min_timestamp)
+ self._ValidateTimestamp(min_timestamp)
if max_timestamp is None:
max_timestamp = rdfvalue.RDFDatetime.Now()
else:
- _ValidateTimestamp(max_timestamp)
+ self._ValidateTimestamp(max_timestamp)
return self.delegate.ReadClientStats(client_id, min_timestamp,
max_timestamp)
@@ -3475,7 +3488,7 @@ def ReadPathInfo(self, client_id, path_type, components, timestamp=None):
_ValidatePathComponents(components)
if timestamp is not None:
- _ValidateTimestamp(timestamp)
+ self._ValidateTimestamp(timestamp)
return self.delegate.ReadPathInfo(
client_id, path_type, components, timestamp=timestamp)
@@ -3525,7 +3538,7 @@ def FindPathInfoByPathID(self, client_id, path_type, path_id, timestamp=None):
precondition.ValidateClientId(client_id)
if timestamp is not None:
- _ValidateTimestamp(timestamp)
+ self._ValidateTimestamp(timestamp)
return self.delegate.FindPathInfoByPathID( # pytype: disable=attribute-error
client_id, path_type, path_id, timestamp=timestamp)
@@ -3596,7 +3609,7 @@ def WriteUserNotification(self, notification):
def ReadUserNotifications(self, username, state=None, timerange=None):
_ValidateUsername(username)
if timerange is not None:
- _ValidateTimeRange(timerange)
+ self._ValidateTimeRange(timerange)
if state is not None:
_ValidateNotificationState(state)
@@ -3759,7 +3772,7 @@ def ReadCronJobRuns(self, job_id):
return self.delegate.ReadCronJobRuns(job_id)
def DeleteOldCronJobRuns(self, cutoff_timestamp):
- _ValidateTimestamp(cutoff_timestamp)
+ self._ValidateTimestamp(cutoff_timestamp)
return self.delegate.DeleteOldCronJobRuns(cutoff_timestamp)
def WriteHashBlobReferences(self, references_by_hash):
@@ -3880,10 +3893,10 @@ def UpdateFlow(self,
precondition.AssertType(client_crash_info, rdf_client.ClientCrash)
if processing_since != Database.unchanged:
if processing_since is not None:
- _ValidateTimestamp(processing_since)
+ self._ValidateTimestamp(processing_since)
if processing_deadline != Database.unchanged:
if processing_deadline is not None:
- _ValidateTimestamp(processing_deadline)
+ self._ValidateTimestamp(processing_deadline)
return self.delegate.UpdateFlow(
client_id,
flow_id,
@@ -4487,6 +4500,33 @@ def ReadBlobEncryptionKeys(
return self.delegate.ReadBlobEncryptionKeys(blob_ids)
+ # Minimal allowed timestamp is DB-specific. Thus the validation code for
+ # timestamps is DB-specific as well.
+ def _ValidateTimeRange(
+ self, timerange: Tuple[rdfvalue.RDFDatetime, rdfvalue.RDFDatetime]
+ ):
+ """Parses a timerange argument and always returns non-None timerange."""
+ if len(timerange) != 2:
+ raise ValueError("Timerange should be a sequence with 2 items.")
+
+ (start, end) = timerange
+ precondition.AssertOptionalType(start, rdfvalue.RDFDatetime)
+ precondition.AssertOptionalType(end, rdfvalue.RDFDatetime)
+ if start is not None:
+ self._ValidateTimestamp(start)
+ if end is not None:
+ self._ValidateTimestamp(end)
+
+ # Minimal allowed timestamp is DB-specific. Thus the validation code for
+ # timestamps is DB-specific as well.
+ def _ValidateTimestamp(self, timestamp: rdfvalue.RDFDatetime):
+ precondition.AssertType(timestamp, rdfvalue.RDFDatetime)
+ if timestamp < self.delegate.MinTimestamp():
+ raise ValueError(
+ "Timestamp is less than the minimal timestamp allowed by the DB: "
+ f"{timestamp} < {self.delegate.MinTimestamp()}."
+ )
+
def _ValidateEnumType(value, expected_enum_type):
if value not in expected_enum_type.reverse_enum:
@@ -4606,35 +4646,10 @@ def _ValidateNotificationState(notification_state):
raise ValueError("notification_state can't be STATE_UNSET")
-def _ValidateTimeRange(timerange):
- """Parses a timerange argument and always returns non-None timerange."""
- if len(timerange) != 2:
- raise ValueError("Timerange should be a sequence with 2 items.")
-
- (start, end) = timerange
- precondition.AssertOptionalType(start, rdfvalue.RDFDatetime)
- precondition.AssertOptionalType(end, rdfvalue.RDFDatetime)
-
-
-def _ValidateClosedTimeRange(time_range):
- """Checks that a time-range has both start and end timestamps set."""
- time_range_start, time_range_end = time_range
- _ValidateTimestamp(time_range_start)
- _ValidateTimestamp(time_range_end)
- if time_range_start > time_range_end:
- raise ValueError("Invalid time-range: %d > %d." %
- (time_range_start.AsMicrosecondsSinceEpoch(),
- time_range_end.AsMicrosecondsSinceEpoch()))
-
-
def _ValidateDuration(duration):
precondition.AssertType(duration, rdfvalue.Duration)
-def _ValidateTimestamp(timestamp):
- precondition.AssertType(timestamp, rdfvalue.RDFDatetime)
-
-
def _ValidateClientPathID(client_path_id):
precondition.AssertType(client_path_id, rdf_objects.ClientPathID)
diff --git a/grr/server/grr_response_server/databases/db_flows_test.py b/grr/server/grr_response_server/databases/db_flows_test.py
index c23d9b9898..a3d6f82d61 100644
--- a/grr/server/grr_response_server/databases/db_flows_test.py
+++ b/grr/server/grr_response_server/databases/db_flows_test.py
@@ -302,12 +302,12 @@ def testFlowTimestamp(self):
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
- before_timestamp = rdfvalue.RDFDatetime.Now()
+ before_timestamp = self.db.Now()
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
self.db.WriteFlowObject(flow_obj)
- after_timestamp = rdfvalue.RDFDatetime.Now()
+ after_timestamp = self.db.Now()
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertBetween(flow_obj.create_time, before_timestamp, after_timestamp)
@@ -318,13 +318,13 @@ def testFlowTimestampWithMissingCreationTime(self):
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
- before_timestamp = rdfvalue.RDFDatetime.Now()
+ before_timestamp = self.db.Now()
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.create_time = None
self.db.WriteFlowObject(flow_obj)
- after_timestamp = rdfvalue.RDFDatetime.Now()
+ after_timestamp = self.db.Now()
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertBetween(flow_obj.create_time, before_timestamp, after_timestamp)
@@ -698,7 +698,7 @@ def testProcessingInformationUpdate(self):
client_id = db_test_utils.InitializeClient(self.db)
flow_id = db_test_utils.InitializeFlow(self.db, client_id)
- now = rdfvalue.RDFDatetime.Now()
+ now = self.db.Now()
deadline = now + rdfvalue.Duration.From(6, rdfvalue.HOURS)
self.db.UpdateFlow(
client_id,
@@ -1530,10 +1530,10 @@ def testLeaseFlowForProcessingUpdatesFlowObjects(self):
def testFlowLastUpdateTime(self):
processing_time = rdfvalue.Duration.From(60, rdfvalue.SECONDS)
- t0 = rdfvalue.RDFDatetime.Now()
+ t0 = self.db.Now()
client_id = db_test_utils.InitializeClient(self.db)
flow_id = db_test_utils.InitializeFlow(self.db, client_id)
- t1 = rdfvalue.RDFDatetime.Now()
+ t1 = self.db.Now()
read_flow = self.db.ReadFlowObject(client_id, flow_id)
@@ -1543,9 +1543,9 @@ def testFlowLastUpdateTime(self):
client_id, flow_id, processing_time)
self.assertBetween(flow_for_processing.last_update_time, t0, t1)
- t2 = rdfvalue.RDFDatetime.Now()
+ t2 = self.db.Now()
self.db.ReleaseProcessedFlow(flow_for_processing)
- t3 = rdfvalue.RDFDatetime.Now()
+ t3 = self.db.Now()
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertBetween(read_flow.last_update_time, t2, t3)
diff --git a/grr/server/grr_response_server/databases/mem.py b/grr/server/grr_response_server/databases/mem.py
index c2c0f4cfd1..54b8dd9e16 100644
--- a/grr/server/grr_response_server/databases/mem.py
+++ b/grr/server/grr_response_server/databases/mem.py
@@ -160,3 +160,6 @@ def _DeepCopy(self, obj):
def Now(self) -> rdfvalue.RDFDatetime:
del self # Unused.
return rdfvalue.RDFDatetime.Now()
+
+ def MinTimestamp(self) -> rdfvalue.RDFDatetime:
+ return rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)
diff --git a/grr/server/grr_response_server/databases/mysql.py b/grr/server/grr_response_server/databases/mysql.py
index 8ce2df5417..5692d39696 100644
--- a/grr/server/grr_response_server/databases/mysql.py
+++ b/grr/server/grr_response_server/databases/mysql.py
@@ -594,3 +594,9 @@ def Now(self, cursor: MySQLdb.cursors.Cursor) -> rdfvalue.RDFDatetime:
[(timestamp,)] = cursor.fetchall()
return mysql_utils.TimestampToRDFDatetime(timestamp)
+
+ def MinTimestamp(self) -> rdfvalue.RDFDatetime:
+ # Per https://dev.mysql.com/doc/refman/8.0/en/datetime.html:
+ # "the range for TIMESTAMP values is '1970-01-01 00:00:01.000000' to
+ # '2038-01-19 03:14:07.999999'".
+ return rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1)
diff --git a/grr/server/grr_response_server/databases/mysql_migration.py b/grr/server/grr_response_server/databases/mysql_migration.py
index 9e52fe0a0e..02331de3c6 100644
--- a/grr/server/grr_response_server/databases/mysql_migration.py
+++ b/grr/server/grr_response_server/databases/mysql_migration.py
@@ -4,6 +4,7 @@
import contextlib
import logging
import os
+import re
import time
from typing import Callable, Optional, Sequence, Text
@@ -107,4 +108,23 @@ def DumpCurrentSchema(cursor: Cursor) -> Text:
rows = cursor.fetchall()
defs.append(rows[0][1])
+ cursor.execute("""
+ SELECT TRIGGER_NAME FROM INFORMATION_SCHEMA.TRIGGERS
+ WHERE trigger_schema = (SELECT DATABASE())
+ """)
+ for (trigger,) in sorted(cursor.fetchall()):
+ cursor.execute(f"SHOW CREATE TRIGGER `{trigger}`")
+ rows = cursor.fetchall()
+
+ # `SHOW CREATE TRIGGER` will return the concrete definer of the trigger,
+ # so we need to patch its output here to show the default `CURRENT_USER`.
+ trigger_def = re.sub(
+ r"^CREATE\s+DEFINER\s*=\s*`[^`]+`(@`[^`]+`)?\s*TRIGGER",
+ "CREATE DEFINER = CURRENT_USER TRIGGER",
+ rows[0][2],
+ count=1,
+ flags=re.DOTALL | re.MULTILINE,
+ )
+ defs.append(trigger_def)
+
return "\n\n".join(defs)
diff --git a/grr/server/grr_response_server/databases/mysql_migrations/0014.sql b/grr/server/grr_response_server/databases/mysql_migrations/0014.sql
new file mode 100644
index 0000000000..d3fc926014
--- /dev/null
+++ b/grr/server/grr_response_server/databases/mysql_migrations/0014.sql
@@ -0,0 +1,79 @@
+-- Trigger an update of `client_paths.last_stat_entry_timestamp` after a
+-- new recored is inserted into `client_path_stat_entries`.
+CREATE
+ TRIGGER
+ client_paths_last_stat_entry_timestamp_insert
+ AFTER INSERT
+ON
+ client_path_stat_entries
+ FOR EACH ROW UPDATE client_paths
+SET
+ -- Note: using a conditional update here is more efficient than
+ -- restricting the UPDATE query via its WHERE clause, as it allows
+ -- finer-grained locking to be used (i.e. row locking based on the primary
+ -- key matching in the WHERE clause).
+ last_stat_entry_timestamp = IF(
+ last_stat_entry_timestamp IS NULL OR last_stat_entry_timestamp < NEW.timestamp,
+ NEW.timestamp,
+ last_stat_entry_timestamp)
+WHERE (client_id, path_type, path_id) = (NEW.client_id, NEW.path_type, NEW.path_id);
+
+-- Trigger an update of `client_paths.last_stat_entry_timestamp` after a
+-- record is updated in `client_path_stat_entries`.
+CREATE
+ TRIGGER
+ client_paths_last_stat_entry_timestamp_update
+ AFTER UPDATE
+ON
+ client_path_stat_entries
+ FOR EACH ROW UPDATE client_paths
+SET
+ -- Note: using a conditional update here is more efficient than
+ -- restricting the UPDATE query via its WHERE clause, as it allows
+ -- finer-grained locking to be used (i.e. row locking based on the primary
+ -- key matching in the WHERE clause).
+ last_stat_entry_timestamp = IF(
+ last_stat_entry_timestamp IS NULL OR last_stat_entry_timestamp < NEW.timestamp,
+ NEW.timestamp,
+ last_stat_entry_timestamp)
+WHERE (client_id, path_type, path_id) = (NEW.client_id, NEW.path_type, NEW.path_id);
+
+-- Trigger an update of `client_paths.last_hash_entry_timestamp` after a
+-- new recored is inserted into `client_path_hash_entries`.
+CREATE
+ TRIGGER
+ client_paths_last_hash_entry_timestamp_insert
+ AFTER INSERT
+ON
+ client_path_hash_entries
+ FOR EACH ROW UPDATE client_paths
+SET
+ -- Note: using a conditional update here is more efficient than
+ -- restricting the UPDATE query via its WHERE clause, as it allows
+ -- finer-grained locking to be used (i.e. row locking based on the primary
+ -- key matching in the WHERE clause).
+ last_hash_entry_timestamp = IF(
+ last_hash_entry_timestamp IS NULL OR last_hash_entry_timestamp < NEW.timestamp,
+ NEW.timestamp,
+ last_hash_entry_timestamp)
+WHERE (client_id, path_type, path_id) = (NEW.client_id, NEW.path_type, NEW.path_id);
+
+-- Trigger an update of `client_paths.last_hash_entry_timestamp` after a
+-- record is updated in `client_path_hash_entries`.
+CREATE
+ TRIGGER
+ client_paths_last_hash_entry_timestamp_update
+ AFTER UPDATE
+ON
+ client_path_hash_entries
+ FOR EACH ROW UPDATE client_paths
+SET
+ -- Note: using a conditional update here is more efficient than
+ -- restricting the UPDATE query via its WHERE clause, as it allows
+ -- finer-grained locking to be used (i.e. row locking based on the primary
+ -- key matching in the WHERE clause).
+ last_hash_entry_timestamp = IF(
+ last_hash_entry_timestamp IS NULL OR last_hash_entry_timestamp < NEW.timestamp,
+ NEW.timestamp,
+ last_hash_entry_timestamp)
+WHERE (client_id, path_type, path_id) = (NEW.client_id, NEW.path_type, NEW.path_id);
diff --git a/grr/server/grr_response_server/databases/mysql_paths.py b/grr/server/grr_response_server/databases/mysql_paths.py
index a6e2eea127..667480fb2e 100644
--- a/grr/server/grr_response_server/databases/mysql_paths.py
+++ b/grr/server/grr_response_server/databases/mysql_paths.py
@@ -278,6 +278,8 @@ def WritePathInfos(
cursor.executemany(query, parent_path_info_values)
if stat_entry_values:
+ # Note: `client_paths.last_stat_entry_timestamp` will get updated via
+ # a DB trigger during the execution of this query.
query = """
INSERT INTO client_path_stat_entries(client_id, path_type, path_id,
timestamp,
@@ -286,19 +288,9 @@ def WritePathInfos(
"""
cursor.executemany(query, stat_entry_values)
- query = """
- UPDATE client_paths
- FORCE INDEX (PRIMARY)
- SET last_stat_entry_timestamp = FROM_UNIXTIME(%s)
- WHERE (client_id, path_type, path_id) IN ({})
- """.format(
- ",".join(["(%s, %s, %s)"] * len(stat_entry_values))
- )
-
- params = [mysql_utils.RDFDatetimeToTimestamp(now)] + stat_entry_keys
- cursor.execute(query, params)
-
if hash_entry_values:
+ # Note: `client_paths.last_hash_entry_timestamp` will get updated via
+ # a DB trigger during the execution of this query.
query = """
INSERT INTO client_path_hash_entries(client_id, path_type, path_id,
timestamp,
@@ -307,18 +299,6 @@ def WritePathInfos(
"""
cursor.executemany(query, hash_entry_values)
- query = """
- UPDATE client_paths
- FORCE INDEX (PRIMARY)
- SET last_hash_entry_timestamp = FROM_UNIXTIME(%s)
- WHERE (client_id, path_type, path_id) IN ({})
- """.format(
- ",".join(["(%s, %s, %s)"] * len(hash_entry_values))
- )
-
- params = [mysql_utils.RDFDatetimeToTimestamp(now)] + hash_entry_keys
- cursor.execute(query, params)
-
@mysql_utils.WithTransaction(readonly=True)
def ListDescendantPathInfos(self,
client_id,
diff --git a/grr/server/grr_response_server/flows/cron/system_test.py b/grr/server/grr_response_server/flows/cron/system_test.py
index cd47572a30..4d653306c2 100644
--- a/grr/server/grr_response_server/flows/cron/system_test.py
+++ b/grr/server/grr_response_server/flows/cron/system_test.py
@@ -181,8 +181,8 @@ def testPurgeClientStats(self):
data_store.REL_DB.WriteClientStats(client_id, st)
stat_entries = data_store.REL_DB.ReadClientStats(
- client_id=client_id,
- min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0))
+ client_id=client_id, min_timestamp=data_store.REL_DB.MinTimestamp()
+ )
self.assertCountEqual([1 * max_age, 1.5 * max_age, 2 * max_age],
[e.RSS_size for e in stat_entries])
diff --git a/grr/server/grr_response_server/flows/general/administrative_test.py b/grr/server/grr_response_server/flows/general/administrative_test.py
index d7338a8ed4..be5474e24e 100644
--- a/grr/server/grr_response_server/flows/general/administrative_test.py
+++ b/grr/server/grr_response_server/flows/general/administrative_test.py
@@ -505,8 +505,9 @@ def GetClientStats(self, _):
samples = data_store.REL_DB.ReadClientStats(
client_id=client_id,
- min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0),
- max_timestamp=rdfvalue.RDFDatetime.Now())
+ min_timestamp=data_store.REL_DB.MinTimestamp(),
+ max_timestamp=rdfvalue.RDFDatetime.Now(),
+ )
self.assertNotEmpty(samples)
sample = samples[0]
diff --git a/grr/server/grr_response_server/flows/general/export.py b/grr/server/grr_response_server/flows/general/export.py
index a4d75fbbe4..ad0252ff9d 100644
--- a/grr/server/grr_response_server/flows/general/export.py
+++ b/grr/server/grr_response_server/flows/general/export.py
@@ -36,6 +36,12 @@ def CollectionItemToClientPath(item, client_id=None):
return db.ClientPath.FromPathSpec(client_id, item.pathspec)
elif isinstance(item, rdf_file_finder.FileFinderResult):
return db.ClientPath.FromPathSpec(client_id, item.stat_entry.pathspec)
+ elif isinstance(item, rdf_file_finder.CollectSingleFileResult):
+ return db.ClientPath.FromPathSpec(client_id, item.stat.pathspec)
+ elif isinstance(item, rdf_file_finder.CollectMultipleFilesResult):
+ return db.ClientPath.FromPathSpec(client_id, item.stat.pathspec)
+ elif isinstance(item, rdf_file_finder.CollectFilesByKnownPathResult):
+ return db.ClientPath.FromPathSpec(client_id, item.stat.pathspec)
elif isinstance(item, collectors.ArtifactFilesDownloaderResult):
if item.HasField("downloaded_file"):
return db.ClientPath.FromPathSpec(client_id,
diff --git a/grr/server/grr_response_server/gui/api_plugins/client.py b/grr/server/grr_response_server/gui/api_plugins/client.py
index 8fcf582d53..a285d0d189 100644
--- a/grr/server/grr_response_server/gui/api_plugins/client.py
+++ b/grr/server/grr_response_server/gui/api_plugins/client.py
@@ -439,8 +439,10 @@ class ApiGetClientVersionsHandler(api_call_handler_base.ApiCallHandler):
def Handle(self, args, context=None):
end_time = args.end or rdfvalue.RDFDatetime.Now()
- start_time = args.start or end_time - rdfvalue.Duration.From(
- 3, rdfvalue.MINUTES)
+ start_time = max(
+ args.start or end_time - rdfvalue.Duration.From(3, rdfvalue.MINUTES),
+ data_store.REL_DB.MinTimestamp(),
+ )
items = []
client_id = str(args.client_id)
diff --git a/grr/server/grr_response_server/gui/api_plugins/user.py b/grr/server/grr_response_server/gui/api_plugins/user.py
index 84e7e55865..933c089ec2 100644
--- a/grr/server/grr_response_server/gui/api_plugins/user.py
+++ b/grr/server/grr_response_server/gui/api_plugins/user.py
@@ -1408,8 +1408,11 @@ class ApiListAndResetUserNotificationsHandler(
def Handle(self, args, context=None):
"""Fetches the user notifications."""
- back_timestamp = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(
- 2 * 52, rdfvalue.WEEKS)
+ back_timestamp = max(
+ rdfvalue.RDFDatetime.Now()
+ - rdfvalue.Duration.From(2 * 52, rdfvalue.WEEKS),
+ data_store.REL_DB.MinTimestamp(),
+ )
ns = data_store.REL_DB.ReadUserNotifications(
context.username, timerange=(back_timestamp, None))
diff --git a/grr/server/grr_response_server/gui/ui/components/client_add_label_dialog/module.ts b/grr/server/grr_response_server/gui/ui/components/client_add_label_dialog/module.ts
index c6e36b7acf..33f621e4ce 100644
--- a/grr/server/grr_response_server/gui/ui/components/client_add_label_dialog/module.ts
+++ b/grr/server/grr_response_server/gui/ui/components/client_add_label_dialog/module.ts
@@ -18,24 +18,19 @@ import {ClientAddLabelDialog} from './client_add_label_dialog';
imports: [
CommonModule,
ReactiveFormsModule,
-
MatAutocompleteModule,
MatButtonModule,
MatDialogModule,
MatInputModule,
MatTooltipModule,
-
SubmitOnMetaEnterModule,
],
declarations: [
ClientAddLabelDialog,
],
- entryComponents: [
- ClientAddLabelDialog,
- ],
exports: [
ClientAddLabelDialog,
- ],
+ ]
})
export class ClientAddLabelDialogModule {
}
diff --git a/grr/server/grr_response_server/gui/ui/components/client_details/module.ts b/grr/server/grr_response_server/gui/ui/components/client_details/module.ts
index a72affa27f..26eb0c8912 100644
--- a/grr/server/grr_response_server/gui/ui/components/client_details/module.ts
+++ b/grr/server/grr_response_server/gui/ui/components/client_details/module.ts
@@ -28,14 +28,12 @@ import {VolumesDetailsModule} from './volumes_details/module';
imports: [
CommonModule,
RouterModule,
-
MatButtonModule,
MatChipsModule,
MatDialogModule,
MatDividerModule,
MatIconModule,
MatListModule,
-
ClientDetailsRoutingModule,
CopyButtonModule,
EntryHistoryButtonModule,
@@ -49,9 +47,6 @@ import {VolumesDetailsModule} from './volumes_details/module';
declarations: [
ClientDetails,
],
- entryComponents: [
- EntryHistoryDialog,
- ],
exports: [
ClientDetails,
]
diff --git a/grr/server/grr_response_server/gui/ui/components/flow_args_form/module.ts b/grr/server/grr_response_server/gui/ui/components/flow_args_form/module.ts
index 7bf39e52f7..96e22651ad 100644
--- a/grr/server/grr_response_server/gui/ui/components/flow_args_form/module.ts
+++ b/grr/server/grr_response_server/gui/ui/components/flow_args_form/module.ts
@@ -79,7 +79,6 @@ const FORMS = [
FormsModule,
ReactiveFormsModule,
RouterModule,
-
CdkTreeModule,
MatAutocompleteModule,
MatButtonModule,
@@ -95,9 +94,7 @@ const FORMS = [
MatSelectModule,
MatTreeModule,
MatTooltipModule,
-
CodeEditorModule,
-
ByteComponentsModule,
CommaSeparatedInputModule,
DateTimeInputModule,
@@ -108,10 +105,9 @@ const FORMS = [
ValidationModule,
],
declarations: FORMS,
- entryComponents: FORMS,
exports: [
FlowArgsForm,
- ],
+ ]
})
export class FlowArgsFormModule {
}
diff --git a/grr/server/grr_response_server/gui/ui/components/flow_args_form/osquery_query_helper/module.ts b/grr/server/grr_response_server/gui/ui/components/flow_args_form/osquery_query_helper/module.ts
index 205f4b4012..85df83220b 100644
--- a/grr/server/grr_response_server/gui/ui/components/flow_args_form/osquery_query_helper/module.ts
+++ b/grr/server/grr_response_server/gui/ui/components/flow_args_form/osquery_query_helper/module.ts
@@ -32,9 +32,6 @@ import {TableInfoItem} from './table_info_item';
],
exports: [
OsqueryQueryHelper,
- ],
- entryComponents: [
- OsqueryQueryHelper,
]
})
export class OsqueryQueryHelperModule {
diff --git a/grr/server/grr_response_server/gui/ui/components/flow_args_view/module.ts b/grr/server/grr_response_server/gui/ui/components/flow_args_view/module.ts
index c897941ebd..241fb5d674 100644
--- a/grr/server/grr_response_server/gui/ui/components/flow_args_view/module.ts
+++ b/grr/server/grr_response_server/gui/ui/components/flow_args_view/module.ts
@@ -25,12 +25,9 @@ import {FlowArgsView} from './flow_args_view';
declarations: [
FlowArgsView,
],
- entryComponents: [
- FlowArgsView,
- ],
exports: [
FlowArgsView,
- ],
+ ]
})
export class FlowArgsViewModule {
}
diff --git a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/module.ts b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/module.ts
index d3c0a25aac..b54a163ef2 100644
--- a/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/module.ts
+++ b/grr/server/grr_response_server/gui/ui/components/flow_details/plugins/module.ts
@@ -81,7 +81,6 @@ const COMPONENTS = [
FormsModule,
ReactiveFormsModule,
RouterModule,
-
CdkTreeModule,
MatButtonModule,
MatCardModule,
@@ -99,7 +98,6 @@ const COMPONENTS = [
MatTabsModule,
MatSortModule,
MatTreeModule,
-
CopyButtonModule,
DrawerLinkModule,
HelpersModule,
@@ -107,8 +105,7 @@ const COMPONENTS = [
FilterPaginate,
],
declarations: COMPONENTS,
- exports: COMPONENTS,
- entryComponents: COMPONENTS
+ exports: COMPONENTS
})
export class PluginsModule {
}
diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_results/hunt_results.ng.html b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_results/hunt_results.ng.html
index 80a2d7a019..7a4dd476f5 100644
--- a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_results/hunt_results.ng.html
+++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_results/hunt_results.ng.html
@@ -48,6 +48,14 @@
{{ element[colDesc.key] }}
+
+
+
+
+
- {{
+ {{
element[colDesc.key]
}}
diff --git a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_results/hunt_results_test.ts b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_results/hunt_results_test.ts
index e6bc976751..155b5c12bf 100644
--- a/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_results/hunt_results_test.ts
+++ b/grr/server/grr_response_server/gui/ui/components/hunt/hunt_page/hunt_results/hunt_results_test.ts
@@ -711,4 +711,54 @@ describe('HuntResults', () => {
expect(rows[0].innerText.trim()).toContain('banana');
expect(rows[0].innerText.trim()).toContain('1970-01-01 00:00:00 UTC');
});
+
+ it('expands ExecutePythonHackResult type', () => {
+ const fixture = TestBed.createComponent(HuntResults);
+ fixture.detectChanges();
+
+ const res: readonly ApiHuntResult[] = [
+ {
+ clientId: 'C.1234',
+ payloadType: 'ExecuteBinaryResponse',
+ payload: {
+ 'exitStatus': 0,
+ 'stdout': btoa('I\'m out'),
+ 'stderr': btoa('I\'m groot'),
+ }
+ },
+ ];
+ receiveResults(huntPageGlobalStore, res);
+ fixture.detectChanges();
+
+ const rows = fixture.nativeElement.querySelectorAll('mat-row');
+ expect(rows.length).toBe(1);
+
+ expect(rows[0].innerText.trim()).toContain('C.1234');
+ expect(rows[0].innerText.trim()).toContain('0');
+ expect(rows[0].innerText.trim()).toContain('I\'m out');
+ expect(rows[0].innerText.trim()).toContain('I\'m groot');
+ });
+
+ it('expands ExecutePythonHackResult type', () => {
+ const fixture = TestBed.createComponent(HuntResults);
+ fixture.detectChanges();
+
+ const res: readonly ApiHuntResult[] = [
+ {
+ clientId: 'C.1234',
+ payloadType: 'ExecutePythonHackResult',
+ payload: {
+ 'resultString': 'potato',
+ }
+ },
+ ];
+ receiveResults(huntPageGlobalStore, res);
+ fixture.detectChanges();
+
+ const rows = fixture.nativeElement.querySelectorAll('mat-row');
+ expect(rows.length).toBe(1);
+
+ expect(rows[0].innerText.trim()).toContain('C.1234');
+ expect(rows[0].innerText.trim()).toContain('potato');
+ });
});
diff --git a/grr/server/grr_response_server/gui/ui/lib/api/http_api_service.ts b/grr/server/grr_response_server/gui/ui/lib/api/http_api_service.ts
index a84b3deb91..97ece965cf 100644
--- a/grr/server/grr_response_server/gui/ui/lib/api/http_api_service.ts
+++ b/grr/server/grr_response_server/gui/ui/lib/api/http_api_service.ts
@@ -904,9 +904,9 @@ export class HttpApiService {
const params = new HttpParams({
fromObject: {
- // If start not set, fetch from beginning of time
- start: ((start?.getTime() ?? 1) * 1000).toString(),
- end: ((end ?? new Date()).getTime() * 1000).toString(),
+ // If start not set, fetch from 1 second from epoch.
+ 'start': ((start?.getTime() ?? 1000) * 1000).toString(),
+ 'end': ((end ?? new Date()).getTime() * 1000).toString(),
}
});
diff --git a/grr/server/grr_response_server/gui/ui/lib/api_translation/result.ts b/grr/server/grr_response_server/gui/ui/lib/api_translation/result.ts
index 0d29a0ef1c..55d9d13721 100644
--- a/grr/server/grr_response_server/gui/ui/lib/api_translation/result.ts
+++ b/grr/server/grr_response_server/gui/ui/lib/api_translation/result.ts
@@ -1,7 +1,7 @@
import * as apiInterfaces from '../api/api_interfaces';
import {CellComponent, CellData, ColumnDescriptor, PayloadTranslation} from '../models/result';
-import {translateHashToHex} from './flow';
+import {translateExecuteBinaryResponse, translateHashToHex} from './flow';
import {getHuntResultKey} from './hunt';
import {createOptionalBigInt, createOptionalDate} from './primitive';
@@ -188,6 +188,31 @@ export function toHuntErrorRow(err: apiInterfaces.ApiHuntError, huntId: string):
};
}
+/** EXECUTE_BINARY_COLUMNS describes how to render an ExecuteBinaryRow. */
+export const EXECUTE_BINARY_COLUMNS = {
+ 'exitStatus': {title: 'Exit status'},
+ 'stdout': {title: 'Standard output', component: CellComponent.TRACE},
+ 'stderr': {title: 'Error output', component: CellComponent.TRACE},
+} as const;
+
+/** Constructs an ExecuteBinaryRow from an ExecuteBinaryResponse. */
+export function toExecuteBinaryRow(e: apiInterfaces.ExecuteBinaryResponse):
+ CellData {
+ return translateExecuteBinaryResponse(e);
+}
+
+/** EXECUTE_PYTHON_HACK_COLUMNS describes how to render an ExecuteBinaryRow. */
+export const EXECUTE_PYTHON_HACK_COLUMNS = {
+ 'result': {title: 'Result', component: CellComponent.TRACE},
+} as const;
+
+/** Constructs an ExecutePythonHackRow from an ExecutePythonHackResult. */
+export function toExecutePythonHackRow(
+ e: apiInterfaces.ExecutePythonHackResult):
+ CellData {
+ return {'result': e.resultString};
+}
+
const FILE_TAB = 'Files';
const CLIENT_INFO_TAB = 'Client Info';
/** ERROR_TAB is used to identify error results tab. */
@@ -204,6 +229,8 @@ export enum PayloadType {
STAT_ENTRY = 'StatEntry',
USER = 'User',
API_HUNT_ERROR = 'ApiHuntError',
+ EXECUTE_BINARY_RESPONSE = 'ExecuteBinaryResponse',
+ EXECUTE_PYTHON_HACK_RESULT = 'ExecutePythonHackResult',
}
/** Maps PayloadType to corresponding translation information. */
@@ -254,4 +281,14 @@ export const PAYLOAD_TYPE_TRANSLATION: {
translateFn: toHuntErrorRow,
columns: ERROR_COLUMNS,
} as PayloadTranslation,
+ [PayloadType.EXECUTE_BINARY_RESPONSE]: {
+ tabName: 'Binary execution',
+ translateFn: toExecuteBinaryRow,
+ columns: EXECUTE_BINARY_COLUMNS,
+ } as PayloadTranslation,
+ [PayloadType.EXECUTE_PYTHON_HACK_RESULT]: {
+ tabName: 'Python execution',
+ translateFn: toExecutePythonHackRow,
+ columns: EXECUTE_PYTHON_HACK_COLUMNS,
+ } as PayloadTranslation,
} as const;
diff --git a/grr/server/grr_response_server/gui/ui/lib/models/result.ts b/grr/server/grr_response_server/gui/ui/lib/models/result.ts
index b1d2ae3bde..36fd91c1b3 100644
--- a/grr/server/grr_response_server/gui/ui/lib/models/result.ts
+++ b/grr/server/grr_response_server/gui/ui/lib/models/result.ts
@@ -12,6 +12,7 @@ export enum CellComponent {
HASH,
HUMAN_READABLE_SIZE,
TIMESTAMP, // Takes in a Date object (see ComponentToType below)
+ TRACE,
USERNAME,
}
@@ -35,6 +36,7 @@ export declare interface ComponentToType {
[CellComponent.HASH]: HexHash|undefined;
[CellComponent.HUMAN_READABLE_SIZE]: bigint|undefined;
[CellComponent.TIMESTAMP]: Date|undefined;
+ [CellComponent.TRACE]: string|undefined;
[CellComponent.USERNAME]: string|undefined;
}
diff --git a/grr/server/grr_response_server/stats_server.py b/grr/server/grr_response_server/stats_server.py
index a65faa18de..f8095d3c1b 100644
--- a/grr/server/grr_response_server/stats_server.py
+++ b/grr/server/grr_response_server/stats_server.py
@@ -3,6 +3,7 @@
import errno
from http import server as http_server
+import ipaddress
import logging
import socket
import threading
@@ -16,13 +17,14 @@
StatsServerHandler = prometheus_client.MetricsHandler
-# Python's standard HTTP server implementation is broken and will work through
-# a IPv4 socket. This means, that on IPv6 only environment, the code will fail
-# to create the socket and fail in mysterious ways.
+# Python's standard HTTP server implementation works through a IPv4 socket.
+# This means, that on IPv6 only environment, the code will fail to create
+# the socket and fail in mysterious ways.
#
-# We hack around this by overriding the `address_family` that `HTTPServer` uses
-# to create the socket and always use IPv6 (it was introduced in 1995, so it is
-# safe to expect that every modern stack will support it already).
+# We work around this by overriding the `address_family` that `HTTPServer` uses
+# to create the socket and always use IPv6 (it was introduced in 1995, but
+# there are a lot of modern environments still using IPv4 - Cloud environments,
+# for example).
class IPv6HTTPServer(http_server.HTTPServer):
address_family = socket.AF_INET6
@@ -44,9 +46,17 @@ def __init__(self, address, port):
def Start(self):
"""Start HTTPServer."""
+ ip = ipaddress.ip_address(self.address)
+ if ip.version == 4:
+ server_cls = http_server.HTTPServer
+ else:
+ server_cls = IPv6HTTPServer
+
try:
- self._http_server = IPv6HTTPServer((self.address, self.port),
- StatsServerHandler)
+ self._http_server = server_cls(
+ (self.address, self.port),
+ StatsServerHandler,
+ )
except socket.error as e:
if e.errno == errno.EADDRINUSE:
raise base_stats_server.PortInUseError(self.port)