diff --git a/archives/archives.go b/archives/archives.go index 76f8dbc..a7c65f9 100644 --- a/archives/archives.go +++ b/archives/archives.go @@ -9,6 +9,7 @@ import ( "encoding/hex" "fmt" "io" + "log/slog" "os" "path/filepath" "time" @@ -19,7 +20,6 @@ import ( "github.com/nyaruka/gocommon/analytics" "github.com/nyaruka/gocommon/dates" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ArchiveType is the type for the archives @@ -471,13 +471,13 @@ func CreateArchiveFile(ctx context.Context, db *sqlx.DB, archive *Archive, archi start := dates.Now() - log := logrus.WithFields(logrus.Fields{ - "org_id": archive.Org.ID, - "archive_type": archive.ArchiveType, - "start_date": archive.StartDate, - "end_date": archive.endDate(), - "period": archive.Period, - }) + log := slog.With( + "org_id", archive.Org.ID, + "archive_type", archive.ArchiveType, + "start_date", archive.StartDate, + "end_date", archive.endDate(), + "period", archive.Period, + ) filename := fmt.Sprintf("%s_%d_%s%d%02d%02d_", archive.ArchiveType, archive.Org.ID, archive.Period, archive.StartDate.Year(), archive.StartDate.Month(), archive.StartDate.Day()) file, err := os.CreateTemp(archivePath, filename) @@ -490,7 +490,7 @@ func CreateArchiveFile(ctx context.Context, db *sqlx.DB, archive *Archive, archi if archive.ArchiveFile == "" { err = os.Remove(file.Name()) if err != nil { - log.WithError(err).WithField("filename", file.Name()).Error("error cleaning up archive file") + log.Error("error cleaning up archive file", "error", err, "filename", file.Name()) } } }() @@ -500,9 +500,7 @@ func CreateArchiveFile(ctx context.Context, db *sqlx.DB, archive *Archive, archi writer := bufio.NewWriter(gzWriter) defer file.Close() - log.WithFields(logrus.Fields{ - "filename": file.Name(), - }).Debug("creating new archive file") + log.Debug("creating new archive file", "filename", file.Name()) recordCount := 0 switch archive.ArchiveType { @@ -540,13 +538,13 @@ func CreateArchiveFile(ctx context.Context, db *sqlx.DB, archive *Archive, archi archive.RecordCount = recordCount archive.BuildTime = int(dates.Since(start) / time.Millisecond) - log.WithFields(logrus.Fields{ - "record_count": recordCount, - "filename": file.Name(), - "file_size": archive.Size, - "file_hash": archive.Hash, - "elapsed": dates.Since(start), - }).Debug("completed writing archive file") + log.Debug("completed writing archive file", + "record_count", recordCount, + "filename", file.Name(), + "file_size", archive.Size, + "file_hash", archive.Hash, + "elapsed", dates.Since(start), + ) return nil } @@ -578,16 +576,15 @@ func UploadArchive(ctx context.Context, s3Client s3iface.S3API, bucket string, a archive.NeedsDeletion = true - logrus.WithFields(logrus.Fields{ - "org_id": archive.Org.ID, - "archive_type": archive.ArchiveType, - "start_date": archive.StartDate, - "period": archive.Period, - "url": archive.URL, - "file_size": archive.Size, - "file_hash": archive.Hash, - }).Debug("completed uploading archive file") - + slog.Debug("completed uploading archive file", + "org_id", archive.Org.ID, + "archive_type", archive.ArchiveType, + "start_date", archive.StartDate, + "period", archive.Period, + "url", archive.URL, + "file_size", archive.Size, + "file_hash", archive.Hash, + ) return nil } @@ -667,14 +664,14 @@ func DeleteArchiveFile(archive *Archive) error { return errors.Wrapf(err, "error deleting temp archive file: %s", archive.ArchiveFile) } - logrus.WithFields(logrus.Fields{ - "org_id": archive.Org.ID, - "archive_type": archive.ArchiveType, - "start_date": archive.StartDate, - "periond": archive.Period, - "db_archive_id": archive.ID, - "filename": archive.ArchiveFile, - }).Debug("deleted temporary archive file") + slog.Debug("deleted temporary archive file", + "org_id", archive.Org.ID, + "archive_type", archive.ArchiveType, + "start_date", archive.StartDate, + "periond", archive.Period, + "db_archive_id", archive.ID, + "filename", archive.ArchiveFile, + ) return nil } @@ -722,7 +719,7 @@ func createArchive(ctx context.Context, db *sqlx.DB, config *Config, s3Client s3 if !config.KeepFiles { err := DeleteArchiveFile(archive) if err != nil { - logrus.WithError(err).Error("error deleting temporary archive file") + slog.Error("error deleting temporary archive file", "error", err) } } }() @@ -743,21 +740,21 @@ func createArchive(ctx context.Context, db *sqlx.DB, config *Config, s3Client s3 } func createArchives(ctx context.Context, db *sqlx.DB, config *Config, s3Client s3iface.S3API, org Org, archives []*Archive) ([]*Archive, []*Archive) { - log := logrus.WithFields(logrus.Fields{"org_id": org.ID, "org_name": org.Name}) + log := slog.With("org_id", org.ID, "org_name", org.Name) created := make([]*Archive, 0, len(archives)) failed := make([]*Archive, 0, 5) for _, archive := range archives { - log.WithFields(logrus.Fields{"start_date": archive.StartDate, "end_date": archive.endDate(), "period": archive.Period, "archive_type": archive.ArchiveType}).Debug("starting archive") + log.With("start_date", archive.StartDate, "end_date", archive.endDate(), "period", archive.Period, "archive_type", archive.ArchiveType).Debug("starting archive") start := dates.Now() err := createArchive(ctx, db, config, s3Client, archive) if err != nil { - log.WithError(err).Error("error creating archive") + log.Error("error creating archive", "error", err) failed = append(failed, archive) } else { - log.WithFields(logrus.Fields{"id": archive.ID, "record_count": archive.RecordCount, "elapsed": dates.Since(start)}).Debug("archive complete") + log.Debug("archive complete", "id", archive.ID, "record_count", archive.RecordCount, "elapsed", dates.Since(start)) created = append(created, archive) } } @@ -770,7 +767,7 @@ func RollupOrgArchives(ctx context.Context, now time.Time, config *Config, db *s ctx, cancel := context.WithTimeout(ctx, time.Hour*3) defer cancel() - log := logrus.WithFields(logrus.Fields{"org_id": org.ID, "org_name": org.Name, "archive_type": archiveType}) + log := slog.With("org_id", org.ID, "org_name", org.Name, "archive_type", archiveType) // get our missing monthly archives archives, err := GetMissingMonthlyArchives(ctx, db, now, org, archiveType) @@ -783,12 +780,12 @@ func RollupOrgArchives(ctx context.Context, now time.Time, config *Config, db *s // build them from rollups for _, archive := range archives { - log := log.WithFields(logrus.Fields{"start_date": archive.StartDate}) + log := log.With("start_date", archive.StartDate) start := dates.Now() err = BuildRollupArchive(ctx, db, config, s3Client, archive, now, org, archiveType) if err != nil { - log.WithError(err).Error("error building monthly archive") + log.Error("error building monthly archive", "error", err) failed = append(failed, archive) continue } @@ -796,7 +793,7 @@ func RollupOrgArchives(ctx context.Context, now time.Time, config *Config, db *s if config.UploadToS3 { err = UploadArchive(ctx, s3Client, config.S3Bucket, archive) if err != nil { - log.WithError(err).Error("error writing archive to s3") + log.Error("error writing archive to s3", "error", err) failed = append(failed, archive) continue } @@ -804,7 +801,7 @@ func RollupOrgArchives(ctx context.Context, now time.Time, config *Config, db *s err = WriteArchiveToDB(ctx, db, archive) if err != nil { - log.WithError(err).Error("error writing record to db") + log.Error("error writing record to db", "error", err) failed = append(failed, archive) continue } @@ -812,12 +809,12 @@ func RollupOrgArchives(ctx context.Context, now time.Time, config *Config, db *s if !config.KeepFiles { err := DeleteArchiveFile(archive) if err != nil { - log.WithError(err).Error("error deleting temporary file") + log.Error("error deleting temporary file", "error", err) continue } } - log.WithFields(logrus.Fields{"id": archive.ID, "record_count": archive.RecordCount, "elapsed": dates.Since(start)}).Info("rollup created") + log.Info("rollup created", "id", archive.ID, "record_count", archive.RecordCount, "elapsed", dates.Since(start)) created = append(created, archive) } @@ -839,14 +836,14 @@ func DeleteArchivedOrgRecords(ctx context.Context, now time.Time, config *Config // for each archive deleted := make([]*Archive, 0, len(archives)) for _, a := range archives { - log := logrus.WithFields(logrus.Fields{ - "archive_id": a.ID, - "org_id": a.OrgID, - "type": a.ArchiveType, - "count": a.RecordCount, - "start": a.StartDate, - "period": a.Period, - }) + log := slog.With( + "archive_id", a.ID, + "org_id", a.OrgID, + "type", a.ArchiveType, + "count", a.RecordCount, + "start", a.StartDate, + "period", a.Period, + ) start := dates.Now() @@ -868,12 +865,12 @@ func DeleteArchivedOrgRecords(ctx context.Context, now time.Time, config *Config } if err != nil { - log.WithError(err).Error("error deleting archive") + log.Error("error deleting archive", "error", err) continue } deleted = append(deleted, a) - log.WithFields(logrus.Fields{"elapsed": dates.Since(start)}).Info("deleted archive records") + log.Info("deleted archive records", "elapsed", dates.Since(start)) } return deleted, nil @@ -881,7 +878,7 @@ func DeleteArchivedOrgRecords(ctx context.Context, now time.Time, config *Config // ArchiveOrg looks for any missing archives for the passed in org, creating and uploading them as necessary, returning the created archives func ArchiveOrg(ctx context.Context, now time.Time, cfg *Config, db *sqlx.DB, s3Client s3iface.S3API, org Org, archiveType ArchiveType) ([]*Archive, []*Archive, []*Archive, []*Archive, []*Archive, error) { - log := logrus.WithFields(logrus.Fields{"org_id": org.ID, "org_name": org.Name}) + log := slog.With("org_id", org.ID, "org_name", org.Name) start := dates.Now() dailiesCreated, dailiesFailed, monthliesCreated, monthliesFailed, err := CreateOrgArchives(ctx, now, cfg, db, s3Client, org, archiveType) @@ -892,7 +889,7 @@ func ArchiveOrg(ctx context.Context, now time.Time, cfg *Config, db *sqlx.DB, s3 if len(dailiesCreated) > 0 { elapsed := dates.Since(start) rate := float32(countRecords(dailiesCreated)) / (float32(elapsed) / float32(time.Second)) - log.WithFields(logrus.Fields{"elapsed": elapsed, "records_per_second": rate}).Info("completed archival for org") + log.Info("completed archival for org", "elapsed", elapsed, "records_per_second", rate) } rollupsCreated, rollupsFailed, err := RollupOrgArchives(ctx, now, cfg, db, s3Client, org, archiveType) @@ -939,12 +936,12 @@ func ArchiveActiveOrgs(db *sqlx.DB, cfg *Config, s3Client s3iface.S3API) error { for _, org := range orgs { // no single org should take more than 12 hours ctx, cancel := context.WithTimeout(context.Background(), time.Hour*12) - log := logrus.WithField("org_id", org.ID).WithField("org_name", org.Name) + log := slog.With("org_id", org.ID, "org_name", org.Name) if cfg.ArchiveMessages { dailiesCreated, dailiesFailed, monthliesCreated, monthliesFailed, _, err := ArchiveOrg(ctx, start, cfg, db, s3Client, org, MessageType) if err != nil { - log.WithError(err).WithField("archive_type", MessageType).Error("error archiving org messages") + log.Error("error archiving org messages", "error", err, "archive_type", MessageType) } totalMsgsRecordsArchived += countRecords(dailiesCreated) totalMsgsArchivesCreated += len(dailiesCreated) @@ -955,7 +952,7 @@ func ArchiveActiveOrgs(db *sqlx.DB, cfg *Config, s3Client s3iface.S3API) error { if cfg.ArchiveRuns { dailiesCreated, dailiesFailed, monthliesCreated, monthliesFailed, _, err := ArchiveOrg(ctx, start, cfg, db, s3Client, org, RunType) if err != nil { - log.WithError(err).WithField("archive_type", RunType).Error("error archiving org runs") + log.Error("error archiving org runs", "error", err, "archive_type", RunType) } totalRunsRecordsArchived += countRecords(dailiesCreated) totalRunsArchivesCreated += len(dailiesCreated) @@ -968,7 +965,7 @@ func ArchiveActiveOrgs(db *sqlx.DB, cfg *Config, s3Client s3iface.S3API) error { } timeTaken := dates.Now().Sub(start) - logrus.WithField("time_taken", timeTaken).WithField("num_orgs", len(orgs)).Info("archiving of active orgs complete") + slog.Info("archiving of active orgs complete", "time_taken", timeTaken, "num_orgs", len(orgs)) analytics.Gauge("archiver.archive_elapsed", timeTaken.Seconds()) analytics.Gauge("archiver.orgs_archived", float64(len(orgs))) diff --git a/archives/archives_test.go b/archives/archives_test.go index f155b1b..2f1540c 100644 --- a/archives/archives_test.go +++ b/archives/archives_test.go @@ -4,6 +4,7 @@ import ( "compress/gzip" "context" "io" + "log/slog" "os" "testing" "time" @@ -13,7 +14,6 @@ import ( "github.com/nyaruka/ezconf" "github.com/nyaruka/gocommon/analytics" "github.com/nyaruka/gocommon/dates" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) @@ -26,7 +26,8 @@ func setup(t *testing.T) *sqlx.DB { _, err = db.Exec(string(testDB)) assert.NoError(t, err) - logrus.SetLevel(logrus.DebugLevel) + + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug}))) return db } diff --git a/archives/messages.go b/archives/messages.go index d45b711..57fdd12 100644 --- a/archives/messages.go +++ b/archives/messages.go @@ -4,13 +4,13 @@ import ( "bufio" "context" "fmt" + "log/slog" "time" "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/jmoiron/sqlx" "github.com/nyaruka/gocommon/dates" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -93,7 +93,7 @@ func writeMessageRecords(ctx context.Context, db *sqlx.DB, archive *Archive, wri recordCount++ } - logrus.WithField("record_count", recordCount).Debug("Done Writing") + slog.Debug("Done Writing", "record_count", recordCount) return recordCount, nil } @@ -119,14 +119,14 @@ func DeleteArchivedMessages(ctx context.Context, config *Config, db *sqlx.DB, s3 defer cancel() start := dates.Now() - log := logrus.WithFields(logrus.Fields{ - "id": archive.ID, - "org_id": archive.OrgID, - "start_date": archive.StartDate, - "end_date": archive.endDate(), - "archive_type": archive.ArchiveType, - "total_count": archive.RecordCount, - }) + log := slog.With( + "id", archive.ID, + "org_id", archive.OrgID, + "start_date", archive.StartDate, + "end_date", archive.endDate(), + "archive_type", archive.ArchiveType, + "total_count", archive.RecordCount, + ) log.Info("deleting messages") // first things first, make sure our file is correct on S3 @@ -169,7 +169,7 @@ func DeleteArchivedMessages(ctx context.Context, config *Config, db *sqlx.DB, s3 } rows.Close() - log.WithField("msg_count", len(msgIDs)).Debug("found messages") + log.Debug("found messages", "msg_count", len(msgIDs)) // verify we don't see more messages than there are in our archive (fewer is ok) if visibleCount > archive.RecordCount { @@ -208,7 +208,7 @@ func DeleteArchivedMessages(ctx context.Context, config *Config, db *sqlx.DB, s3 return errors.Wrap(err, "error committing message delete transaction") } - log.WithField("elapsed", dates.Since(start)).WithField("count", len(idBatch)).Debug("deleted batch of messages") + log.Debug("deleted batch of messages", "elapsed", dates.Since(start), "count", len(idBatch)) cancel() } @@ -226,7 +226,7 @@ func DeleteArchivedMessages(ctx context.Context, config *Config, db *sqlx.DB, s3 archive.NeedsDeletion = false archive.DeletedOn = &deletedOn - logrus.WithField("elapsed", dates.Since(start)).Info("completed deleting messages") + slog.Info("completed deleting messages", "elapsed", dates.Since(start)) return nil } @@ -251,7 +251,8 @@ func DeleteBroadcasts(ctx context.Context, now time.Time, config *Config, db *sq count := 0 for rows.Next() { if count == 0 { - logrus.WithField("org_id", org.ID).Info("deleting broadcasts") + slog.Info("deleting broadcasts", "org_id", org.ID) + } // been deleting this org more than an hour? thats enough for today, exit out @@ -307,7 +308,7 @@ func DeleteBroadcasts(ctx context.Context, now time.Time, config *Config, db *sq } if count > 0 { - logrus.WithFields(logrus.Fields{"elapsed": dates.Since(start), "count": count, "org_id": org.ID}).Info("completed deleting broadcasts") + slog.Info("completed deleting broadcasts", "elapsed", dates.Since(start), "count", count, "org_id", org.ID) } return nil diff --git a/archives/runs.go b/archives/runs.go index 9a35957..f430418 100644 --- a/archives/runs.go +++ b/archives/runs.go @@ -4,13 +4,13 @@ import ( "bufio" "context" "fmt" + "log/slog" "time" "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/jmoiron/sqlx" "github.com/nyaruka/gocommon/dates" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -114,14 +114,14 @@ func DeleteArchivedRuns(ctx context.Context, config *Config, db *sqlx.DB, s3Clie defer cancel() start := dates.Now() - log := logrus.WithFields(logrus.Fields{ - "id": archive.ID, - "org_id": archive.OrgID, - "start_date": archive.StartDate, - "end_date": archive.endDate(), - "archive_type": archive.ArchiveType, - "total_count": archive.RecordCount, - }) + log := slog.With( + "id", archive.ID, + "org_id", archive.OrgID, + "start_date", archive.StartDate, + "end_date", archive.endDate(), + "archive_type", archive.ArchiveType, + "total_count", archive.RecordCount, + ) log.Info("deleting runs") // first things first, make sure our file is correct on S3 @@ -167,7 +167,7 @@ func DeleteArchivedRuns(ctx context.Context, config *Config, db *sqlx.DB, s3Clie } rows.Close() - log.WithField("run_count", len(runIDs)).Debug("found runs") + log.Debug("found runs", "run_count", len(runIDs)) // verify we don't see more runs than there are in our archive (fewer is ok) if runCount > archive.RecordCount { @@ -200,7 +200,7 @@ func DeleteArchivedRuns(ctx context.Context, config *Config, db *sqlx.DB, s3Clie return errors.Wrap(err, "error committing run delete transaction") } - log.WithField("elapsed", dates.Since(start)).WithField("count", len(idBatch)).Debug("deleted batch of runs") + log.Debug("deleted batch of runs", "elapsed", dates.Since(start), "count", len(idBatch)) cancel() } @@ -218,7 +218,7 @@ func DeleteArchivedRuns(ctx context.Context, config *Config, db *sqlx.DB, s3Clie archive.NeedsDeletion = false archive.DeletedOn = &deletedOn - logrus.WithField("elapsed", dates.Since(start)).Info("completed deleting runs") + slog.Info("completed deleting runs", "elapsed", dates.Since(start)) return nil } @@ -243,7 +243,7 @@ func DeleteFlowStarts(ctx context.Context, now time.Time, config *Config, db *sq count := 0 for rows.Next() { if count == 0 { - logrus.WithField("org_id", org.ID).Info("deleting starts") + slog.Info("deleting starts", "org_id", org.ID) } // been deleting this org more than an hour? thats enough for today, exit out @@ -306,7 +306,7 @@ func DeleteFlowStarts(ctx context.Context, now time.Time, config *Config, db *sq } if count > 0 { - logrus.WithFields(logrus.Fields{"elapsed": dates.Since(start), "count": count, "org_id": org.ID}).Info("completed deleting starts") + slog.Info("completed deleting starts", "elapsed", dates.Since(start), "count", count, "org_id", org.ID) } return nil diff --git a/archives/s3.go b/archives/s3.go index 5e7c3ca..17bd698 100644 --- a/archives/s3.go +++ b/archives/s3.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "fmt" "io" + "log/slog" "net/url" "os" "strings" @@ -17,7 +18,6 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/sirupsen/logrus" ) const s3BucketURL = "https://%s.s3.amazonaws.com%s" @@ -44,7 +44,7 @@ func NewS3Client(config *Config) (s3iface.S3API, error) { return nil, err } s3Session.Handlers.Send.PushFront(func(r *request.Request) { - logrus.WithField("headers", r.HTTPRequest.Header).WithField("service", r.ClientInfo.ServiceName).WithField("operation", r.Operation).WithField("params", r.Params).Debug("making aws request") + slog.Debug("making aws request", "headers", r.HTTPRequest.Header, "service", r.ClientInfo.ServiceName, "operation", r.Operation, "params", r.Params) }) s3Client := s3.New(s3Session) @@ -52,7 +52,7 @@ func NewS3Client(config *Config) (s3iface.S3API, error) { // test out our S3 credentials err = TestS3(s3Client, config.S3Bucket) if err != nil { - logrus.WithError(err).Fatal("s3 bucket not reachable") + slog.Error("s3 bucket not reachable", "error", err) return nil, err } diff --git a/cmd/rp-archiver/main.go b/cmd/rp-archiver/main.go index 585cc38..d79f811 100644 --- a/cmd/rp-archiver/main.go +++ b/cmd/rp-archiver/main.go @@ -1,6 +1,7 @@ package main import ( + "log/slog" "os" "strings" "sync" @@ -14,6 +15,7 @@ import ( "github.com/nyaruka/gocommon/analytics" "github.com/nyaruka/gocommon/dates" "github.com/nyaruka/rp-archiver/archives" + "github.com/nyaruka/rp-archiver/utils" "github.com/sirupsen/logrus" ) @@ -42,6 +44,12 @@ func main() { logrus.SetFormatter(&logrus.TextFormatter{}) logrus.WithField("version", version).WithField("released", date).Info("starting archiver") + // configure golang std structured logging to route to logrus + slog.SetDefault(slog.New(utils.NewLogrusHandler(logrus.StandardLogger()))) + + logger := slog.With("comp", "main") + logger.Info("starting archiver", "version", version, "released", date) + // if we have a DSN entry, try to initialize it if config.SentryDSN != "" { hook, err := logrus_sentry.NewSentryHook(config.SentryDSN, []logrus.Level{logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel}) @@ -57,7 +65,7 @@ func main() { // our settings shouldn't contain a timezone, nothing will work right with this not being a constant UTC if strings.Contains(config.DB, "TimeZone") { - logrus.WithField("db", config.DB).Fatalf("invalid db connection string, do not specify a timezone, archiver always uses UTC") + logger.Error("invalid db connection string, do not specify a timezone, archiver always uses UTC", "db", config.DB) } // force our DB connection to be in UTC @@ -69,19 +77,19 @@ func main() { db, err := sqlx.Open("postgres", config.DB) if err != nil { - logrus.Fatal(err) + logger.Error("error connecting to db", "error", err) } else { db.SetMaxOpenConns(2) - logrus.WithField("state", "starting").Info("db ok") + logger.Info("db ok", "state", "starting") } var s3Client s3iface.S3API if config.UploadToS3 { s3Client, err = archives.NewS3Client(config) if err != nil { - logrus.WithError(err).Fatal("unable to initialize s3 client") + logger.Error("unable to initialize s3 client", "error", err) } else { - logrus.WithField("state", "starting").Info("s3 bucket ok") + logger.Info("s3 bucket ok", "state", "starting") } } @@ -90,15 +98,15 @@ func main() { // ensure that we can actually write to the temp directory err = archives.EnsureTempArchiveDirectory(config.TempDir) if err != nil { - logrus.WithError(err).Fatal("cannot write to temp directory") + logger.Error("cannot write to temp directory", "error", err) } else { - logrus.WithField("state", "starting").Info("tmp file access ok") + logger.Info("tmp file access ok", "state", "starting") } // parse our start time timeOfDay, err := dates.ParseTimeOfDay("tt:mm", config.StartTime) if err != nil { - logrus.WithError(err).Fatal("invalid start time supplied, format: HH:MM") + logger.Error("invalid start time supplied, format: HH:MM", "error", err) } // if we have a librato token, configure it @@ -115,7 +123,7 @@ func main() { nextArchival := getNextArchivalTime(timeOfDay) napTime := time.Until(nextArchival) - logrus.WithField("sleep_time", napTime).WithField("next_archival", nextArchival).Info("sleeping until next archival") + logger.Info("sleeping until next archival", "sleep_time", napTime, "next_archival", nextArchival) time.Sleep(napTime) doArchival(db, config, s3Client) @@ -131,7 +139,7 @@ func doArchival(db *sqlx.DB, cfg *archives.Config, s3Client s3iface.S3API) { // try to archive all active orgs, and if it fails, wait 5 minutes and try again err := archives.ArchiveActiveOrgs(db, cfg, s3Client) if err != nil { - logrus.WithError(err).Error("error archiving, will retry in 5 minutes") + slog.Error("error archiving, will retry in 5 minutes", "error", err) time.Sleep(time.Minute * 5) continue } else { diff --git a/go.sum b/go.sum index 4cb0110..6ebca5c 100644 --- a/go.sum +++ b/go.sum @@ -19,6 +19,7 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -72,6 +73,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/utils/logrus.go b/utils/logrus.go new file mode 100644 index 0000000..2650bf9 --- /dev/null +++ b/utils/logrus.go @@ -0,0 +1,92 @@ +// Structured logging handler for logrus so we can rewrite code to use slog package incrementally. Once all logging is +// happening via slog, we just need to hook up Sentry directly to that, and then we can get rid of this file. +package utils + +import ( + "context" + "log/slog" + "slices" + "strings" + + "github.com/sirupsen/logrus" +) + +var levels = map[slog.Level]logrus.Level{ + slog.LevelError: logrus.ErrorLevel, + slog.LevelWarn: logrus.WarnLevel, + slog.LevelInfo: logrus.InfoLevel, + slog.LevelDebug: logrus.DebugLevel, +} + +type LogrusHandler struct { + logger *logrus.Logger + groups []string + attrs []slog.Attr +} + +func NewLogrusHandler(logger *logrus.Logger) *LogrusHandler { + return &LogrusHandler{logger: logger} +} + +func (l *LogrusHandler) clone() *LogrusHandler { + return &LogrusHandler{ + logger: l.logger, + groups: slices.Clip(l.groups), + attrs: slices.Clip(l.attrs), + } +} + +func (l *LogrusHandler) Enabled(ctx context.Context, level slog.Level) bool { + return levels[level] <= l.logger.GetLevel() +} + +func (l *LogrusHandler) Handle(ctx context.Context, r slog.Record) error { + log := logrus.NewEntry(l.logger) + if r.Time.IsZero() { + log = log.WithTime(r.Time) + } + + f := logrus.Fields{} + for _, a := range l.attrs { + if a.Key != "" { + f[a.Key] = a.Value + } + } + log = log.WithFields(f) + + r.Attrs(func(attr slog.Attr) bool { + if attr.Key == "" { + return true + } + log = log.WithField(attr.Key, attr.Value) + return true + }) + log.Logf(levels[r.Level], r.Message) + return nil +} + +func (l *LogrusHandler) groupPrefix() string { + if len(l.groups) > 0 { + return strings.Join(l.groups, ":") + ":" + } + return "" +} + +func (l *LogrusHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + newHandler := l.clone() + for _, a := range attrs { + newHandler.attrs = append(newHandler.attrs, slog.Attr{ + Key: l.groupPrefix() + a.Key, + Value: a.Value, + }) + } + return newHandler +} + +func (l *LogrusHandler) WithGroup(name string) slog.Handler { + newHandler := l.clone() + newHandler.groups = append(newHandler.groups, name) + return newHandler +} + +var _ slog.Handler = &LogrusHandler{}