Skip to content

Commit

Permalink
Allow Discovery Manager and Scrape Manager metrics to come from outsi…
Browse files Browse the repository at this point in the history
…de the "New" functions.
  • Loading branch information
ptodev committed Sep 24, 2024
1 parent 6e971a7 commit fb33272
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 22 deletions.
30 changes: 22 additions & 8 deletions discovery/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]Discovere
}

// NewManager is the Discovery Manager constructor.
// If you want multiple managers to share the same metrics, set "registerer" to nil and use "SetMetrics" in "options".
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager {
if logger == nil {
logger = log.NewNopLogger()
Expand All @@ -99,13 +100,15 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
option(mgr)
}

// Register the metrics.
// We have to do this after setting all options, so that the name of the Manager is set.
if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil {
mgr.metrics = metrics
} else {
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
return nil
if mgr.metrics == nil {
// Register the metrics.
// We have to do this after setting all options, so that the name of the Manager is set.
if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil {
mgr.metrics = metrics
} else {
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
return nil
}
}

return mgr
Expand Down Expand Up @@ -169,11 +172,22 @@ func (m *Manager) Providers() []*Provider {
return m.providers
}

// SetMetrics is useful if you want multiple managers to share the same metrics.
func SetMetrics(metrics *Metrics) func(*Manager) {
return func(m *Manager) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.metrics = metrics
}
}

// UnregisterMetrics unregisters manager metrics. It does not unregister
// service discovery or refresh metrics, whose lifecycle is managed independent
// of the discovery Manager.
func (m *Manager) UnregisterMetrics() {
m.metrics.Unregister(m.registerer)
if m.registerer != nil && m.metrics != nil {
m.metrics.Unregister(m.registerer)
}
}

// Run starts the background processing.
Expand Down
29 changes: 27 additions & 2 deletions scrape/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable, registere
logger = log.NewNopLogger()
}

sm, err := newScrapeMetrics(registerer)
sm, err := NewScrapeMetrics(registerer)
if err != nil {
return nil, fmt.Errorf("failed to create scrape manager due to error: %w", err)
}
Expand All @@ -66,6 +66,31 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable, registere
return m, nil
}

func NewManagerWithMetrics(o *Options, logger log.Logger, app storage.Appendable, metrics *ScrapeMetrics) *Manager {
if o == nil {
o = &Options{}
}
if logger == nil {
logger = log.NewNopLogger()
}

m := &Manager{
append: app,
opts: o,
logger: logger,
scrapeConfigs: make(map[string]*config.ScrapeConfig),
scrapePools: make(map[string]*scrapePool),
graceShut: make(chan struct{}),
triggerReload: make(chan struct{}, 1),
metrics: metrics,
buffers: pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }),
}

m.metrics.setTargetMetadataCacheGatherer(m)

return m
}

// Options are the configuration parameters to the scrape manager.
type Options struct {
ExtraMetrics bool
Expand Down Expand Up @@ -108,7 +133,7 @@ type Manager struct {

triggerReload chan struct{}

metrics *scrapeMetrics
metrics *ScrapeMetrics
}

// Run receives and saves target set updates and triggers the scraping loops reloading.
Expand Down
10 changes: 5 additions & 5 deletions scrape/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
)

type scrapeMetrics struct {
type ScrapeMetrics struct {
reg prometheus.Registerer
// Used by Manager.
targetMetadataCache *MetadataMetricsCollector
Expand Down Expand Up @@ -54,8 +54,8 @@ type scrapeMetrics struct {
targetScrapeNativeHistogramBucketLimit prometheus.Counter
}

func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
sm := &scrapeMetrics{reg: reg}
func NewScrapeMetrics(reg prometheus.Registerer) (*ScrapeMetrics, error) {
sm := &ScrapeMetrics{reg: reg}

// Manager metrics.
sm.targetMetadataCache = &MetadataMetricsCollector{
Expand Down Expand Up @@ -257,12 +257,12 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
return sm, nil
}

func (sm *scrapeMetrics) setTargetMetadataCacheGatherer(gatherer TargetsGatherer) {
func (sm *ScrapeMetrics) setTargetMetadataCacheGatherer(gatherer TargetsGatherer) {
sm.targetMetadataCache.TargetsGatherer = gatherer
}

// Unregister unregisters all metrics.
func (sm *scrapeMetrics) Unregister() {
func (sm *ScrapeMetrics) Unregister() {
sm.reg.Unregister(sm.targetMetadataCache)
sm.reg.Unregister(sm.targetScrapePools)
sm.reg.Unregister(sm.targetScrapePoolsFailed)
Expand Down
14 changes: 7 additions & 7 deletions scrape/scrape.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ type scrapePool struct {

noDefaultPort bool

metrics *scrapeMetrics
metrics *ScrapeMetrics
}

type labelLimits struct {
Expand Down Expand Up @@ -122,7 +122,7 @@ const maxAheadTime = 10 * time.Minute
// returning an empty label set is interpreted as "drop".
type labelsMutator func(labels.Labels) labels.Labels

func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) {
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, buffers *pool.Pool, options *Options, metrics *ScrapeMetrics) (*scrapePool, error) {
if logger == nil {
logger = log.NewNopLogger()
}
Expand Down Expand Up @@ -696,7 +696,7 @@ type targetScraper struct {
acceptHeader string
acceptEncodingHeader string

metrics *scrapeMetrics
metrics *ScrapeMetrics
}

var errBodySizeLimit = errors.New("body size limit exceeded")
Expand Down Expand Up @@ -849,7 +849,7 @@ type scrapeLoop struct {
reportExtraMetrics bool
appendMetadataToWAL bool

metrics *scrapeMetrics
metrics *ScrapeMetrics

skipOffsetting bool // For testability.
}
Expand Down Expand Up @@ -880,7 +880,7 @@ type scrapeCache struct {
metaMtx sync.Mutex
metadata map[string]*metaEntry

metrics *scrapeMetrics
metrics *ScrapeMetrics
}

// metaEntry holds meta information about a metric.
Expand All @@ -896,7 +896,7 @@ func (m *metaEntry) size() int {
return len(m.Help) + len(m.Unit) + len(m.Type)
}

func newScrapeCache(metrics *scrapeMetrics) *scrapeCache {
func newScrapeCache(metrics *ScrapeMetrics) *scrapeCache {
return &scrapeCache{
series: map[string]*cacheEntry{},
droppedSeries: map[string]*uint64{},
Expand Down Expand Up @@ -1133,7 +1133,7 @@ func newScrapeLoop(ctx context.Context,
appendMetadataToWAL bool,
target *Target,
passMetadataInContext bool,
metrics *scrapeMetrics,
metrics *ScrapeMetrics,
skipOffsetting bool,
) *scrapeLoop {
if l == nil {
Expand Down

0 comments on commit fb33272

Please sign in to comment.