Skip to content

Commit

Permalink
chore: fix nil pointer
Browse files Browse the repository at this point in the history
  • Loading branch information
moshloop committed Aug 28, 2024
1 parent f51ee03 commit 9018c50
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 13 deletions.
7 changes: 7 additions & 0 deletions api/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,13 @@ func (ctx ScrapeContext) ScrapeConfig() *v1.ScrapeConfig {
return ctx.scrapeConfig
}

func (ctx ScrapeContext) ScraperID() string {
if ctx.scrapeConfig == nil || ctx.scrapeConfig.GetPersistedID() == nil {
return ""
}
return ctx.scrapeConfig.GetPersistedID().String()
}

func (ctx ScrapeContext) Namespace() string {
if ctx.namespace != "" {
return ctx.namespace
Expand Down
8 changes: 3 additions & 5 deletions db/update.go
Original file line number Diff line number Diff line change
Expand Up @@ -693,11 +693,9 @@ func generateConfigChange(ctx api.ScrapeContext, newConf, prev models.ConfigItem

duration := time.Since(start)

if ctx.ScrapeConfig() != nil {
ctx.Histogram("scraper_diff_duration",
[]float64{1, 10, 100, 1000, 10000}, "scraper", ctx.ScrapeConfig().GetPersistedID().String()).
Record(time.Duration(duration.Milliseconds()))
}
ctx.Histogram("scraper_diff_duration",
[]float64{1, 10, 100, 1000, 10000}, "scraper", ctx.ScraperID()).
Record(time.Duration(duration.Milliseconds()))

msg := fmt.Sprintf("generated in %dms for %s size=%s diff=%s",
duration.Milliseconds(),
Expand Down
10 changes: 5 additions & 5 deletions scrapers/cron.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ func watchKubernetesEventsWithRetry(ctx api.ScrapeContext, config v1.Kubernetes)
}

func SyncScrapeJob(sc api.ScrapeContext) error {
id := sc.ScrapeConfig().GetPersistedID().String()
id := sc.ScraperID()

var existingJob *job.Job
if j, ok := scrapeJobs.Load(id); ok {
Expand Down Expand Up @@ -201,7 +201,7 @@ func newScraperJob(sc api.ScrapeContext) *job.Job {
Semaphores: semaphores,
RunNow: sc.PropertyOn(false, "runNow"),
Retention: job.RetentionBalanced,
ResourceID: sc.ScrapeConfig().GetPersistedID().String(),
ResourceID: sc.ScraperID(),
ResourceType: job.ResourceTypeScraper,
ID: fmt.Sprintf("%s/%s", sc.ScrapeConfig().Namespace, sc.ScrapeConfig().Name),
Fn: func(jr job.JobRuntime) error {
Expand All @@ -220,7 +220,7 @@ func newScraperJob(sc api.ScrapeContext) *job.Job {
func scheduleScraperJob(sc api.ScrapeContext) error {
j := newScraperJob(sc)

scrapeJobs.Store(sc.ScrapeConfig().GetPersistedID().String(), j)
scrapeJobs.Store(sc.ScraperID(), j)
if err := j.AddToScheduler(scrapeJobScheduler); err != nil {
return fmt.Errorf("[%s] failed to schedule %v", j.Name, err)
}
Expand All @@ -244,13 +244,13 @@ func scheduleScraperJob(sc api.ScrapeContext) error {
if err := eventsWatchJob.AddToScheduler(scrapeJobScheduler); err != nil {
return fmt.Errorf("failed to schedule kubernetes watch event consumer job: %v", err)
}
scrapeJobs.Store(consumeKubernetesWatchEventsJobKey(sc.ScrapeConfig().GetPersistedID().String()), eventsWatchJob)
scrapeJobs.Store(consumeKubernetesWatchEventsJobKey(sc.ScraperID()), eventsWatchJob)

resourcesWatchJob := ConsumeKubernetesWatchResourcesJobFunc(sc, config)
if err := resourcesWatchJob.AddToScheduler(scrapeJobScheduler); err != nil {
return fmt.Errorf("failed to schedule kubernetes watch resources consumer job: %v", err)
}
scrapeJobs.Store(consumeKubernetesWatchResourcesJobKey(sc.ScrapeConfig().GetPersistedID().String()), resourcesWatchJob)
scrapeJobs.Store(consumeKubernetesWatchResourcesJobKey(sc.ScraperID()), resourcesWatchJob)
}

return nil
Expand Down
4 changes: 2 additions & 2 deletions scrapers/kubernetes/events_watch.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func WatchResources(ctx api.ScrapeContext, config v1.Kubernetes) error {
}
globalSharedInformerManager.stop(ctx, kubeconfig, existingWatches...)

ctx.Counter("kubernetes_scraper_resource_watcher", "scraper_id", lo.FromPtr(ctx.ScrapeConfig().GetPersistedID()).String()).Add(1)
ctx.Counter("kubernetes_scraper_resource_watcher", "scraper_id", ctx.ScraperID()).Add(1)
return nil
}

Expand All @@ -106,7 +106,7 @@ func WatchEvents(ctx api.ScrapeContext, config v1.Kubernetes) error {
}
defer watcher.Stop()

ctx.Counter("kubernetes_scraper_event_watcher", "scraper_id", lo.FromPtr(ctx.ScrapeConfig().GetPersistedID()).String()).Add(1)
ctx.Counter("kubernetes_scraper_event_watcher", "scraper_id", ctx.ScraperID()).Add(1)
for watchEvent := range watcher.ResultChan() {
var event v1.KubernetesEvent
if err := event.FromObj(watchEvent.Object); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion scrapers/slack/slack.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func (s Scraper) scrapeChannel(ctx api.ScrapeContext, config v1.Slack, client *S
opt.Oldest = strconv.FormatInt(time.Now().Add(-time.Hour*24*7).Unix(), 10)
}

lastMessagekey := fmt.Sprintf("%s:%s", lo.FromPtr(ctx.ScrapeConfig().GetPersistedID()), channel.ID)
lastMessagekey := fmt.Sprintf("%s:%s", ctx.ScraperID(), channel.ID)
if last, ok := lastScrapeTime.Load(lastMessagekey); ok {
if last.(string) > opt.Oldest {
opt.Oldest = last.(string)
Expand Down

0 comments on commit 9018c50

Please sign in to comment.