diff --git a/api/context.go b/api/context.go index ebad20cd..b97f52e9 100644 --- a/api/context.go +++ b/api/context.go @@ -115,6 +115,13 @@ func (ctx ScrapeContext) ScrapeConfig() *v1.ScrapeConfig { return ctx.scrapeConfig } +func (ctx ScrapeContext) ScraperID() string { + if ctx.scrapeConfig == nil || ctx.scrapeConfig.GetPersistedID() == nil { + return "" + } + return ctx.scrapeConfig.GetPersistedID().String() +} + func (ctx ScrapeContext) Namespace() string { if ctx.namespace != "" { return ctx.namespace diff --git a/db/update.go b/db/update.go index 298038b7..3c22b9d3 100644 --- a/db/update.go +++ b/db/update.go @@ -693,11 +693,9 @@ func generateConfigChange(ctx api.ScrapeContext, newConf, prev models.ConfigItem duration := time.Since(start) - if ctx.ScrapeConfig() != nil { - ctx.Histogram("scraper_diff_duration", - []float64{1, 10, 100, 1000, 10000}, "scraper", ctx.ScrapeConfig().GetPersistedID().String()). - Record(time.Duration(duration.Milliseconds())) - } + ctx.Histogram("scraper_diff_duration", + []float64{1, 10, 100, 1000, 10000}, "scraper", ctx.ScraperID()). + Record(time.Duration(duration.Milliseconds())) msg := fmt.Sprintf("generated in %dms for %s size=%s diff=%s", duration.Milliseconds(), diff --git a/scrapers/cron.go b/scrapers/cron.go index e2db3410..ece58161 100644 --- a/scrapers/cron.go +++ b/scrapers/cron.go @@ -143,7 +143,7 @@ func watchKubernetesEventsWithRetry(ctx api.ScrapeContext, config v1.Kubernetes) } func SyncScrapeJob(sc api.ScrapeContext) error { - id := sc.ScrapeConfig().GetPersistedID().String() + id := sc.ScraperID() var existingJob *job.Job if j, ok := scrapeJobs.Load(id); ok { @@ -201,7 +201,7 @@ func newScraperJob(sc api.ScrapeContext) *job.Job { Semaphores: semaphores, RunNow: sc.PropertyOn(false, "runNow"), Retention: job.RetentionBalanced, - ResourceID: sc.ScrapeConfig().GetPersistedID().String(), + ResourceID: sc.ScraperID(), ResourceType: job.ResourceTypeScraper, ID: fmt.Sprintf("%s/%s", sc.ScrapeConfig().Namespace, sc.ScrapeConfig().Name), Fn: func(jr job.JobRuntime) error { @@ -220,7 +220,7 @@ func newScraperJob(sc api.ScrapeContext) *job.Job { func scheduleScraperJob(sc api.ScrapeContext) error { j := newScraperJob(sc) - scrapeJobs.Store(sc.ScrapeConfig().GetPersistedID().String(), j) + scrapeJobs.Store(sc.ScraperID(), j) if err := j.AddToScheduler(scrapeJobScheduler); err != nil { return fmt.Errorf("[%s] failed to schedule %v", j.Name, err) } @@ -244,13 +244,13 @@ func scheduleScraperJob(sc api.ScrapeContext) error { if err := eventsWatchJob.AddToScheduler(scrapeJobScheduler); err != nil { return fmt.Errorf("failed to schedule kubernetes watch event consumer job: %v", err) } - scrapeJobs.Store(consumeKubernetesWatchEventsJobKey(sc.ScrapeConfig().GetPersistedID().String()), eventsWatchJob) + scrapeJobs.Store(consumeKubernetesWatchEventsJobKey(sc.ScraperID()), eventsWatchJob) resourcesWatchJob := ConsumeKubernetesWatchResourcesJobFunc(sc, config) if err := resourcesWatchJob.AddToScheduler(scrapeJobScheduler); err != nil { return fmt.Errorf("failed to schedule kubernetes watch resources consumer job: %v", err) } - scrapeJobs.Store(consumeKubernetesWatchResourcesJobKey(sc.ScrapeConfig().GetPersistedID().String()), resourcesWatchJob) + scrapeJobs.Store(consumeKubernetesWatchResourcesJobKey(sc.ScraperID()), resourcesWatchJob) } return nil diff --git a/scrapers/kubernetes/events_watch.go b/scrapers/kubernetes/events_watch.go index 31127fbd..fba6c8bd 100644 --- a/scrapers/kubernetes/events_watch.go +++ b/scrapers/kubernetes/events_watch.go @@ -80,7 +80,7 @@ func WatchResources(ctx api.ScrapeContext, config v1.Kubernetes) error { } globalSharedInformerManager.stop(ctx, kubeconfig, existingWatches...) - ctx.Counter("kubernetes_scraper_resource_watcher", "scraper_id", lo.FromPtr(ctx.ScrapeConfig().GetPersistedID()).String()).Add(1) + ctx.Counter("kubernetes_scraper_resource_watcher", "scraper_id", ctx.ScraperID()).Add(1) return nil } @@ -106,7 +106,7 @@ func WatchEvents(ctx api.ScrapeContext, config v1.Kubernetes) error { } defer watcher.Stop() - ctx.Counter("kubernetes_scraper_event_watcher", "scraper_id", lo.FromPtr(ctx.ScrapeConfig().GetPersistedID()).String()).Add(1) + ctx.Counter("kubernetes_scraper_event_watcher", "scraper_id", ctx.ScraperID()).Add(1) for watchEvent := range watcher.ResultChan() { var event v1.KubernetesEvent if err := event.FromObj(watchEvent.Object); err != nil { diff --git a/scrapers/slack/slack.go b/scrapers/slack/slack.go index d8b3db18..b2d9711d 100644 --- a/scrapers/slack/slack.go +++ b/scrapers/slack/slack.go @@ -73,7 +73,7 @@ func (s Scraper) scrapeChannel(ctx api.ScrapeContext, config v1.Slack, client *S opt.Oldest = strconv.FormatInt(time.Now().Add(-time.Hour*24*7).Unix(), 10) } - lastMessagekey := fmt.Sprintf("%s:%s", lo.FromPtr(ctx.ScrapeConfig().GetPersistedID()), channel.ID) + lastMessagekey := fmt.Sprintf("%s:%s", ctx.ScraperID(), channel.ID) if last, ok := lastScrapeTime.Load(lastMessagekey); ok { if last.(string) > opt.Oldest { opt.Oldest = last.(string)