diff --git a/CHANGELOG.md b/CHANGELOG.md index 526ba26..b6832a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,9 @@ log is based on the [Keep a CHANGELOG](http://keepachangelog.com/) project. - Add HPE Proliant XL420 Support [#33](https://github.com/Comcast/fishymetrics/issues/33) - consolidate exporters into a single generic exporter [#52](https://github.com/Comcast/fishymetrics/issues/52) - update Dockerfile to comply with opensource packaging requirements [#61](https://github.com/Comcast/fishymetrics/issues/61) +- Storage controller status metric for HP servers [#79](https://github.com/Comcast/fishymetrics/issues/79) +- Ignore CPU metrics if Processor is Absent [#79](https://github.com/Comcast/fishymetrics/issues/79) +- Added support for metrics collection from Dell servers [#77](https://github.com/Comcast/fishymetrics/issues/77) ## Fixed diff --git a/exporter/exporter.go b/exporter/exporter.go index 6f4d058..8484b52 100644 --- a/exporter/exporter.go +++ b/exporter/exporter.go @@ -99,6 +99,8 @@ type SystemEndpoints struct { systems []string power []string thermal []string + volumes []string + virtualDrives []string } type DriveEndpoints struct { @@ -239,8 +241,47 @@ func NewExporter(ctx context.Context, target, uri, profile, model string, exclud return nil, err } + // newer servers have volumes endpoint in storage controller, these volumes hold virtual drives member urls + if len(sysEndpoints.storageController) > 0 { + var controllerOutput oem.System + for _, controller := range sysEndpoints.storageController { + controllerOutput, err = getSystemsMetadata(exp.url+controller, target, retryClient) + if err != nil { + log.Error("error when getting storage controller metadata", zap.Error(err), zap.Any("trace_id", ctx.Value("traceID"))) + return nil, err + } + if controllerOutput.Volumes.URL != "" { + url := appendSlash(controllerOutput.Volumes.URL) + if checkUnique(sysEndpoints.volumes, url) { + sysEndpoints.volumes = append(sysEndpoints.volumes, url) + } + } + } + } + if len(sysEndpoints.volumes) > 0 { + for _, volume := range sysEndpoints.volumes { + virtualDrives, err := getMemberUrls(exp.url+volume, target, retryClient) + if err != nil { + log.Error("error when getting virtual drive member urls", zap.Error(err), zap.Any("trace_id", ctx.Value("traceID"))) + return nil, err + } + if len(virtualDrives) > 0 { + for _, virtualDrive := range virtualDrives { + if strings.Contains(virtualDrive, "Virtual") { + url := appendSlash(virtualDrive) + if checkUnique(sysEndpoints.virtualDrives, url) { + sysEndpoints.virtualDrives = append(sysEndpoints.virtualDrives, url) + } + } + } + } + } + } + log.Debug("systems endpoints response", zap.Strings("systems_endpoints", sysEndpoints.systems), zap.Strings("storage_ctrl_endpoints", sysEndpoints.storageController), + zap.Strings("volumes_endpoints", sysEndpoints.volumes), + zap.Strings("virtual_drives_endpoints", sysEndpoints.virtualDrives), zap.Strings("drives_endpoints", sysEndpoints.drives), zap.Strings("power_endpoints", sysEndpoints.power), zap.Strings("thermal_endpoints", sysEndpoints.thermal), @@ -342,6 +383,11 @@ func NewExporter(ctx context.Context, target, uri, profile, model string, exclud tasks = append(tasks, pool.NewTask(common.Fetch(exp.url+url, target, profile, retryClient), exp.url+url, handle(&exp, STORAGE_CONTROLLER))) } + // virtual drives + for _, url := range sysEndpoints.virtualDrives { + tasks = append(tasks, pool.NewTask(common.Fetch(exp.url+url, target, profile, retryClient), exp.url+url, handle(&exp, LOGICALDRIVE))) + } + // power for _, url := range sysEndpoints.power { tasks = append(tasks, pool.NewTask(common.Fetch(exp.url+url, target, profile, retryClient), exp.url+url, handle(&exp, POWER))) diff --git a/exporter/handlers.go b/exporter/handlers.go index aaca558..c2cbf09 100644 --- a/exporter/handlers.go +++ b/exporter/handlers.go @@ -316,7 +316,9 @@ func (e *Exporter) exportLogicalDriveMetrics(body []byte) error { if dllogical.Raid == "" { ldName = dllogical.DisplayName raidType = dllogical.RaidType - volIdentifier = dllogical.Identifiers[0].DurableName + if len(dllogical.Identifiers) > 0 { + volIdentifier = dllogical.Identifiers[0].DurableName + } } else { ldName = dllogical.LogicalDriveName raidType = dllogical.Raid @@ -430,6 +432,7 @@ func (e *Exporter) exportMemorySummaryMetrics(body []byte) error { var state float64 var dlm oem.System var dlMemory = (*e.DeviceMetrics)["memoryMetrics"] + var totalSystemMemoryGiB string err := json.Unmarshal(body, &dlm) if err != nil { return fmt.Errorf("Error Unmarshalling MemorySummaryMetrics - " + err.Error()) @@ -444,7 +447,14 @@ func (e *Exporter) exportMemorySummaryMetrics(body []byte) error { state = BAD } - (*dlMemory)["memoryStatus"].WithLabelValues(e.ChassisSerialNumber, e.Model, strconv.Itoa(dlm.MemorySummary.TotalSystemMemoryGiB)).Set(state) + switch dlm.MemorySummary.TotalSystemMemoryGiB.(type) { + case int: + totalSystemMemoryGiB = strconv.Itoa(dlm.MemorySummary.TotalSystemMemoryGiB.(int)) + case float64: + totalSystemMemoryGiB = strconv.FormatFloat(dlm.MemorySummary.TotalSystemMemoryGiB.(float64), 'f', -1, 64) + } + + (*dlMemory)["memoryStatus"].WithLabelValues(e.ChassisSerialNumber, e.Model, totalSystemMemoryGiB).Set(state) return nil } diff --git a/oem/system.go b/oem/system.go index dc4207b..44bd5ff 100644 --- a/oem/system.go +++ b/oem/system.go @@ -26,6 +26,7 @@ type System struct { SystemHostname string `json:"HostName"` Oem OemSys `json:"Oem"` MemorySummary MemorySummary `json:"MemorySummary"` + Volumes Link `json:"Volumes"` } type OemSys struct { @@ -67,7 +68,7 @@ type StorageBattery struct { // MemorySummary is the json object for MemorySummary metadata type MemorySummary struct { Status StatusMemory `json:"Status"` - TotalSystemMemoryGiB int `json:"TotalSystemMemoryGiB"` + TotalSystemMemoryGiB interface{} `json:"TotalSystemMemoryGiB"` TotalSystemPersistentMemoryGiB int `json:"TotalSystemPersistentMemoryGiB"` }