Skip to content

Commit

Permalink
Add missing JAVADOC
Browse files Browse the repository at this point in the history
Closes #113

Signed-off-by: Lukáš Vlček <[email protected]>
  • Loading branch information
lukas-vlcek committed Nov 18, 2022
1 parent f61f63c commit eac001a
Show file tree
Hide file tree
Showing 11 changed files with 265 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ private String[] getExtendedNodeLabelNames(String... labelNames) {
}

/**
* @param nodeInfo {@link Tuple} holding [nodeName, nodeID]
* @param nodeInfo A {@link Tuple} holding [nodeName, nodeID]
* @param labelValues Prometheus label values
* @return Prometheus label values extended with cluster and specific node context
*/
Expand All @@ -104,6 +104,33 @@ private String[] getExtendedNodeLabelValues(
return extended;
}

/**
* <p>
* Register a new metric in the catalog. The metric is registered using the metric name, a help text and optional
* set of labels. The metric prefix is configured via {@link RestPrometheusMetricsAction#METRIC_PREFIX}.
* <p>
* Example:
* <pre>{@code
* // Register new metric for cluster shards:
* // The metric will be called opensearch_cluster_shards_number (the opensearch_ is the default metric prefix),
* // then the help text will be "Number of shards", and finally we expect that the prometheus metric will
* // carry various shard count for different types of shards (initializing, active, ... etc).
* catalog.registerClusterGauge("cluster_shards_number", "Number of shards", "type");
*
* // ... later in the code:
* private void populateClusterShards(ClusterHealthResponse chr) {
* catalog.setClusterGauge("cluster_shards_number", chr.getActiveShards(), "active");
* catalog.setClusterGauge("cluster_shards_number", chr.getActivePrimaryShards(), "active_primary");
* catalog.setClusterGauge("cluster_shards_number", chr.getDelayedUnassignedShards(), "unassigned");
* catalog.setClusterGauge("cluster_shards_number", chr.getInitializingShards(), "initializing");
* catalog.setClusterGauge("cluster_shards_number", chr.getRelocatingShards(), "relocating");
* catalog.setClusterGauge("cluster_shards_number", chr.getUnassignedShards(), "unassigned");
* }
* }</pre>
* @param metric Metric name without the metric prefix
* @param help Help text for the metric
* @param labels Optional set of labels
*/
public void registerClusterGauge(String metric, String help, String... labels) {
Gauge gauge = Gauge.build().
name(metricPrefix + metric).
Expand All @@ -116,11 +143,45 @@ public void registerClusterGauge(String metric, String help, String... labels) {
logger.debug(String.format(Locale.ENGLISH, "Registered new cluster gauge %s", metric));
}

/**
* Set a value for cluster metric that has been previously registered using {@link #registerClusterGauge(String, String, String...)}.
* @see #registerClusterGauge(String, String, String...)
* @param metric Metric name without the metric prefix
* @param value Value of the metric
* @param labelValues Optional set of label values
*/
public void setClusterGauge(String metric, double value, String... labelValues) {
Gauge gauge = (Gauge) metrics.get(metric);
gauge.labels(getExtendedClusterLabelValues(labelValues)).set(value);
}

/**
* <p>
* Register a new metric in the catalog. This is similar to {@link #registerClusterGauge(String, String, String...)}
* except using this method means that we are registering a metric at the cluster node level.
* <p>
* Example:
* <pre>{@code
* // Register new metric for cluster node:
* // The metric will be called opensearch_threadpool_threads_count (the opensearch_ is the default metric prefix),
* // then the help text will be "Number of shards", and finally we expect that the prometheus metric will
* // carry threadpool name and type.
* catalog.registerNodeGauge("threadpool_threads_count", "Count of threads in thread pool", "name", "type");
*
* // ... later in the code:
* private void updateThreadPoolMetrics(Tuple<String, String> nodeInfo, ThreadPoolStats tps) {
* if (tps != null) {
* for (ThreadPoolStats.Stats st : tps) {
* catalog.setNodeGauge(nodeInfo, "threadpool_threads_count", st.getCompleted(), st.getName(), "completed");
* catalog.setNodeGauge(nodeInfo, "threadpool_threads_count", st.getRejected(), st.getName(), "rejected");
* }
* }
* }
* }</pre>
* @param metric Metric name without the metric prefix
* @param help Help text for the metric
* @param labels Optional set of labels
*/
public void registerNodeGauge(String metric, String help, String... labels) {
Gauge gauge = Gauge.build().
name(metricPrefix + metric).
Expand All @@ -133,13 +194,28 @@ public void registerNodeGauge(String metric, String help, String... labels) {
logger.debug(String.format(Locale.ENGLISH, "Registered new node gauge %s", metric));
}

/**
* Set a value for cluster node metric that has been previously registered using {@link #registerNodeGauge(String, String, String...)}.
* @see #registerNodeGauge(String, String, String...)
* @param nodeInfo A {@link Tuple} holding [nodeName, nodeID]
* @param metric Metric name without the metric prefix
* @param value Value of the metric
* @param labelValues Optional set of label values
*/
public void setNodeGauge(Tuple<String, String> nodeInfo,
String metric, double value,
String... labelValues) {
Gauge gauge = (Gauge) metrics.get(metric);
gauge.labels(getExtendedNodeLabelValues(nodeInfo, labelValues)).set(value);
}

/**
* Registers a new summary metric.
* @see Summary
* @param metric Metric name
* @param help Help text for the metric
* @param labels Optional set of labels
*/
public void registerSummaryTimer(String metric, String help, String... labels) {
Summary summary = Summary.build().
name(metricPrefix + metric).
Expand All @@ -152,12 +228,26 @@ public void registerSummaryTimer(String metric, String help, String... labels) {
logger.debug(String.format(Locale.ENGLISH, "Registered new summary %s", metric));
}

/**
* Start specific summary metric.
* @see Summary
* @param nodeInfo A {@link Tuple} holding [nodeName, nodeID]
* @param metric Metric name
* @param labelValues Optional set of label values
* @return Summary timer
*/
public Summary.Timer startSummaryTimer(Tuple<String, String> nodeInfo, String metric,
String... labelValues) {
Summary summary = (Summary) metrics.get(metric);
return summary.labels(getExtendedNodeLabelValues(nodeInfo, labelValues)).startTimer();
}

/**
* Returns all the metrics from the catalog formatted in UTF-8 plain/text.
* More specifically as {@link TextFormat#CONTENT_TYPE_004}.
* @return Text representation of the metric from the catalog
* @throws IOException If creating the text representation goes wrong
*/
public String toTextFormat() throws IOException {
Writer writer = new StringWriter();
TextFormat.write004(writer, registry.metricFamilySamples());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,12 @@ public class PrometheusMetricsCollector {
private boolean isPrometheusIndices;
private PrometheusMetricsCatalog catalog;

/**
* A constructor.
* @param catalog {@link PrometheusMetricsCatalog}
* @param isPrometheusIndices boolean flag for index level metric
* @param isPrometheusClusterSettings boolean flag cluster settings metrics
*/
public PrometheusMetricsCollector(PrometheusMetricsCatalog catalog,
boolean isPrometheusIndices,
boolean isPrometheusClusterSettings) {
Expand All @@ -64,6 +70,9 @@ public PrometheusMetricsCollector(PrometheusMetricsCatalog catalog,
this.catalog = catalog;
}

/**
* Call this method to register all the metrics that we want to capture.
*/
public void registerMetrics() {
catalog.registerSummaryTimer("metrics_generate_time_seconds", "Time spent while generating metrics");

Expand Down Expand Up @@ -951,10 +960,19 @@ public void updateMetrics(String originNodeName, String originNodeId,
timer.observeDuration();
}

/**
* Get the metric catalog.
* @return The catalog
*/
public PrometheusMetricsCatalog getCatalog() {
return catalog;
}

/**
* @see PrometheusMetricsCatalog#toTextFormat()
* @return A text representation of the catalog
* @throws IOException If creating the text representation goes wrong
*/
public String getTextContent() throws IOException {
return this.catalog.toTextFormat();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,20 +31,43 @@
*/
public class PrometheusSettings {

static String PROMETHEUS_CLUSTER_SETTINGS_KEY = "prometheus.cluster.settings";
static String PROMETHEUS_INDICES_KEY = "prometheus.indices";
static String PROMETHEUS_NODES_FILTER_KEY = "prometheus.nodes.filter";

/**
* This setting is used configure weather to expose cluster settings metrics or not. The default value is true.
* Can be configured in opensearch.yml file or update dynamically under key {@link #PROMETHEUS_CLUSTER_SETTINGS_KEY}.
*/
public static final Setting<Boolean> PROMETHEUS_CLUSTER_SETTINGS =
Setting.boolSetting("prometheus.cluster.settings", true,
Setting.boolSetting(PROMETHEUS_CLUSTER_SETTINGS_KEY, true,
Setting.Property.Dynamic, Setting.Property.NodeScope);

/**
* This setting is used configure weather to expose low level index metrics or not. The default value is true.
* Can be configured in opensearch.yml file or update dynamically under key {@link #PROMETHEUS_INDICES_KEY}.
*/
public static final Setting<Boolean> PROMETHEUS_INDICES =
Setting.boolSetting("prometheus.indices", true,
Setting.boolSetting(PROMETHEUS_INDICES_KEY, true,
Setting.Property.Dynamic, Setting.Property.NodeScope);

/**
* This setting is used configure which cluster nodes to gather metrics from. The default value is _local.
* Can be configured in opensearch.yml file or update dynamically under key {@link #PROMETHEUS_NODES_FILTER_KEY}.
*/
public static final Setting<String> PROMETHEUS_NODES_FILTER =
Setting.simpleString("prometheus.nodes.filter", "_local",
Setting.simpleString(PROMETHEUS_NODES_FILTER_KEY, "_local",
Setting.Property.Dynamic, Setting.Property.NodeScope);

private volatile boolean clusterSettings;
private volatile boolean indices;
private volatile String nodesFilter;

/**
* A constructor.
* @param settings Settings
* @param clusterSettings Cluster settings
*/
public PrometheusSettings(Settings settings, ClusterSettings clusterSettings) {
setPrometheusClusterSettings(PROMETHEUS_CLUSTER_SETTINGS.get(settings));
setPrometheusIndices(PROMETHEUS_INDICES.get(settings));
Expand All @@ -64,13 +87,25 @@ private void setPrometheusIndices(boolean flag) {

private void setPrometheusNodesFilter(String filter) { this.nodesFilter = filter; }

/**
* Get value of settings key {@link #PROMETHEUS_CLUSTER_SETTINGS_KEY}.
* @return boolean value of the key
*/
public boolean getPrometheusClusterSettings() {
return this.clusterSettings;
}

/**
* Get value of settings key {@link #PROMETHEUS_INDICES_KEY}.
* @return boolean value of the key
*/
public boolean getPrometheusIndices() {
return this.indices;
}

/**
* Get value of settings key {@link #PROMETHEUS_NODES_FILTER_KEY}.
* @return boolean value of the key
*/
public String getNodesFilter() { return this.nodesFilter; }
}
34 changes: 34 additions & 0 deletions src/main/java/org/opensearch/action/ClusterStatsData.java
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,11 @@ public class ClusterStatsData extends ActionResponse {
private Double[] diskHighInPctRef = new Double[]{diskHighInPct};
private Double[] floodStageInPctRef = new Double[]{floodStageInPct};

/**
* A constructor.
* @param in A streamInput to materialize the instance from
* @throws IOException if reading from streamInput is not successful
*/
public ClusterStatsData(StreamInput in) throws IOException {
super(in);
thresholdEnabled = in.readOptionalBoolean();
Expand All @@ -87,6 +92,7 @@ public ClusterStatsData(StreamInput in) throws IOException {
// There are several layers of cluster settings in Elasticsearch each having different priority.
// We need to traverse them from the top priority down to find relevant value of each setting.
// See https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html#_order_of_precedence
// TODO[lukas-vlcek]: update to OpenSearch referenced
for (Settings s : new Settings[]{
// See: RestClusterGetSettingsAction#response
// or: https://github.com/elastic/elasticsearch/pull/33247/files
Expand Down Expand Up @@ -152,35 +158,63 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalDouble(floodStageInPct);
}

/**
* Get value of setting controlled by {@link org.opensearch.cluster.routing.allocation.DiskThresholdSettings#CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING}.
* @return A Boolean value of the setting.
*/
public Boolean getThresholdEnabled() {
return thresholdEnabled;
}

/**
* Get value of setting controlled by {@link org.opensearch.cluster.routing.allocation.DiskThresholdSettings#CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING}.
* @return A Long value of the setting.
*/
@Nullable
public Long getDiskLowInBytes() {
return diskLowInBytes;
}

/**
* Get value of setting controlled by {@link org.opensearch.cluster.routing.allocation.DiskThresholdSettings#CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING}.
* @return A Long value of the setting.
*/
@Nullable
public Long getDiskHighInBytes() {
return diskHighInBytes;
}

/**
* Get value of setting controlled by {@link org.opensearch.cluster.routing.allocation.DiskThresholdSettings#CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING}.
* @return A Long value of the setting.
*/
@Nullable
public Long getFloodStageInBytes() {
return floodStageInBytes;
}

/**
* Get value of setting controlled by {@link org.opensearch.cluster.routing.allocation.DiskThresholdSettings#CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING}.
* @return A Double value of the setting.
*/
@Nullable
public Double getDiskLowInPct() {
return diskLowInPct;
}

/**
* Get value of setting controlled by {@link org.opensearch.cluster.routing.allocation.DiskThresholdSettings#CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING}.
* @return A Double value of the setting.
*/
@Nullable
public Double getDiskHighInPct() {
return diskHighInPct;
}

/**
* Get value of setting controlled by {@link org.opensearch.cluster.routing.allocation.DiskThresholdSettings#CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING}.
* @return A Double value of the setting.
*/
@Nullable
public Double getFloodStageInPct() {
return floodStageInPct;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,14 @@
*/
public class NodePrometheusMetricsAction extends ActionType<NodePrometheusMetricsResponse> {

/**
* An action singleton instance at the node level.
*/
public static final NodePrometheusMetricsAction INSTANCE = new NodePrometheusMetricsAction();

/**
* A privilege that users need to have to be allowed to request metrics from plugin REST endpoint.
*/
public static final String NAME = "cluster:monitor/prometheus/metrics";

private NodePrometheusMetricsAction() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,27 @@
*/
public class NodePrometheusMetricsRequest extends ClusterManagerNodeReadRequest<NodePrometheusMetricsRequest> {

/**
* A constructor.
*/
public NodePrometheusMetricsRequest() {
super();
}

/**
* A constructor that utilizes the inputStream.
* @param in inputStream
* @throws IOException if there is an exception reading from inputStream
*/
public NodePrometheusMetricsRequest(StreamInput in) throws IOException {
super(in);
}

/**
* A validation of the request.
* Currently, no validations are needed, thus this method always returns null.
* @return null
*/
@Override
public ActionRequestValidationException validate() {
return null;
Expand Down
Loading

0 comments on commit eac001a

Please sign in to comment.