Skip to content

Commit

Permalink
fix: derive path on a partial result set from the nearest parent
Browse files Browse the repository at this point in the history
  • Loading branch information
adityathebe committed May 17, 2024
1 parent 84cc7da commit a67edaf
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 9 deletions.
65 changes: 58 additions & 7 deletions db/update.go
Original file line number Diff line number Diff line change
Expand Up @@ -658,12 +658,8 @@ func extractConfigsAndChangesFromResults(ctx api.ScrapeContext, scrapeStartTime
return nil, nil, nil, nil, fmt.Errorf("unable to setup parents: %w", err)
}

if root != "" {
// Only work with the Tree if the result set has the root node.
// Incremental scrapers only have partial result set.
if err := setConfigPaths(ctx, tree, root, allConfigs); err != nil {
return nil, nil, nil, nil, fmt.Errorf("unable to set config paths: %w", err)
}
if err := setConfigPaths(ctx, tree, root, allConfigs); err != nil {
return nil, nil, nil, nil, fmt.Errorf("unable to set config paths: %w", err)
}

// We sort the new config items such that parents are always first.
Expand Down Expand Up @@ -710,6 +706,61 @@ func setConfigParents(ctx api.ScrapeContext, parentTypeToConfigMap map[configExt
}

func setConfigPaths(ctx api.ScrapeContext, tree graph.Graph[string, string], root string, allConfigs []*models.ConfigItem) error {
if root == "" {
// When we have a partial result set from an incremental scraper
// we try to form a partial tree that's just sufficient to
// connects the config item back to the root.
//
// The way to do that is by finding the parent from a db lookup
//
// Example: on a partial result set of just a new Deployment, ReplicaSet & Pod
// we only need to find the parent of the deployment, which is the namespace,
// from the db as both ReplicaSet's & Pod's parent is withint the result set.
configIDs := make(map[string]struct{})
for _, c := range allConfigs {
configIDs[c.ID] = struct{}{}
}

for _, c := range allConfigs {
if c.ParentID == nil {
continue
}

if _, found := configIDs[*c.ParentID]; found {
continue
}

parent, err := ctx.TempCache().Get(*c.ParentID)
if err != nil {
return fmt.Errorf("unable to get parent(%s): %w", c, err)
}
logger.Infof("Found parent: (%s, type=%s, path=%s", *parent.Name, *parent.Type, parent.Path)

if parent.Path == "" {
if err := tree.AddVertex(parent.ID); err != nil && !errors.Is(err, graph.ErrVertexAlreadyExists) {
return fmt.Errorf("unable to add vertex(%s): %w", parent, err)
}
} else {
nodes := strings.Split(parent.Path, ".")
for i, n := range nodes {
if err := tree.AddVertex(n); err != nil && !errors.Is(err, graph.ErrVertexAlreadyExists) {
return fmt.Errorf("unable to add vertex(%s): %w", n, err)
}

if i != 0 {
if err := tree.AddEdge(nodes[i-1], n); err != nil && !errors.Is(err, graph.ErrEdgeAlreadyExists) {
return fmt.Errorf("unable to add edge(%s): %w", c, err)
}
}
}

if root == "" {
root = nodes[0]
}
}
}
}

for _, c := range allConfigs {
if c.ParentID != nil {
if err := tree.AddEdge(*c.ParentID, c.ID); err != nil {
Expand All @@ -720,7 +771,7 @@ func setConfigPaths(ctx api.ScrapeContext, tree graph.Graph[string, string], roo

for _, c := range allConfigs {
if paths, err := graph.ShortestPath(tree, root, c.ID); err != nil {
ctx.Logger.V(3).Infof("unable to get the path for config(%s): %v", c, err)
ctx.Logger.V(0).Infof("unable to get the path for config(%s): %v", c, err)
} else if len(paths) > 0 {
c.Path = strings.Join(paths, ".")
}
Expand Down
2 changes: 0 additions & 2 deletions scrapers/cron.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,6 @@ func ConsumeKubernetesWatchEventsJobFunc(sc api.ScrapeContext, config v1.Kuberne
JobHistory: true,
Singleton: true,
Retention: job.RetentionFew,
RunNow: true,
Schedule: "@every 15s",
ResourceID: string(scrapeConfig.GetUID()),
ID: fmt.Sprintf("%s/%s", sc.ScrapeConfig().Namespace, sc.ScrapeConfig().Name),
Expand Down Expand Up @@ -268,7 +267,6 @@ func ConsumeKubernetesWatchResourcesJobFunc(sc api.ScrapeContext, config v1.Kube
JobHistory: true,
Singleton: true,
Retention: job.RetentionFew,
RunNow: true,
Schedule: "@every 15s",
ResourceID: string(scrapeConfig.GetUID()),
ID: fmt.Sprintf("%s/%s", sc.ScrapeConfig().Namespace, sc.ScrapeConfig().Name),
Expand Down

0 comments on commit a67edaf

Please sign in to comment.