From ca8602c5faa5f26437787b6902d407380442702d Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Thu, 23 May 2024 08:47:19 +0200 Subject: [PATCH] dont fail with Failf(), use Errorf() instead; break loop when first concerning log is found --- .../docker/test_env/test_env_builder.go | 10 +++++--- integration-tests/utils/slice.go | 25 +++++++++++++++++++ 2 files changed, 32 insertions(+), 3 deletions(-) create mode 100644 integration-tests/utils/slice.go diff --git a/integration-tests/docker/test_env/test_env_builder.go b/integration-tests/docker/test_env/test_env_builder.go index 852918cc7d4..e43b0582d8d 100644 --- a/integration-tests/docker/test_env/test_env_builder.go +++ b/integration-tests/docker/test_env/test_env_builder.go @@ -306,21 +306,24 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { // we cannot do parallel processing here, because ProcessContainerLogs() locks a mutex that controls whether // new logs can be added to the log stream, so parallel processing would get stuck on waiting for it to be unlocked + LogScanningLoop: for i := 0; i < b.clNodesCount; i++ { // ignore count return, because we are only interested in the error _, err := logProcessor.ProcessContainerLogs(b.te.ClCluster.Nodes[i].ContainerName, processFn) if err != nil && !strings.Contains(err.Error(), testreporters.MultipleLogsAtLogLevelErr) && !strings.Contains(err.Error(), testreporters.OneLogAtLogLevelErr) { - b.l.Error().Err(err).Msg("Error processing logs") - return + b.l.Error().Err(err).Msg("Error processing CL node logs") + continue } else if err != nil && (strings.Contains(err.Error(), testreporters.MultipleLogsAtLogLevelErr) || strings.Contains(err.Error(), testreporters.OneLogAtLogLevelErr)) { flushLogStream = true - b.t.Fatalf("Found a concerning log in Chainklink Node logs: %v", err) + b.t.Errorf("Found a concerning log in Chainklink Node logs: %v", err) + break LogScanningLoop } } b.l.Info().Msg("Finished scanning Chainlink Node logs for concerning errors") } if flushLogStream { + b.l.Info().Msg("Flushing LogStream logs") // we can't do much if this fails, so we just log the error in LogStream if err := b.te.LogStream.FlushAndShutdown(); err != nil { b.l.Error().Err(err).Msg("Error flushing and shutting down LogStream") @@ -328,6 +331,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { b.te.LogStream.PrintLogTargetsLocations() b.te.LogStream.SaveLogLocationInTestSummary() } + b.l.Info().Msg("Finished shutting down LogStream") }) } else { b.l.Warn().Msg("LogStream won't be cleaned up, because test instance is not set or cleanup type is not standard") diff --git a/integration-tests/utils/slice.go b/integration-tests/utils/slice.go new file mode 100644 index 00000000000..93c61eb682e --- /dev/null +++ b/integration-tests/utils/slice.go @@ -0,0 +1,25 @@ +package utils + +func DivideSlice[T any](slice []T, parts int) [][]T { + var divided [][]T + if parts == 1 { + return [][]T{slice} + } + + sliceLength := len(slice) + baseSize := sliceLength / parts + remainder := sliceLength % parts + + start := 0 + for i := 0; i < parts; i++ { + end := start + baseSize + if i < remainder { // Distribute the remainder among the first slices + end++ + } + + divided = append(divided, slice[start:end]) + start = end + } + + return divided +}