Skip to content

Commit

Permalink
[Logging Improvement] Using lambda invocations instead of checking de…
Browse files Browse the repository at this point in the history
…bug/trace isEnabled explicitly

Converted debug/trace/warn/info checks to lambda based logging APIs.

Resolves opensearch-project#8646

Signed-off-by: Abdul Muneer Kolarkunnu <[email protected]>
  • Loading branch information
akolarkunnu committed Jul 5, 2024
1 parent 74230b7 commit 825529a
Show file tree
Hide file tree
Showing 70 changed files with 690 additions and 925 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -403,14 +403,12 @@ void prepareBulkRequest(long thisBatchStartTimeNS, ScrollableHitSource.AsyncResp
* Send a bulk request, handling retries.
*/
void sendBulkRequest(BulkRequest request, Runnable onSuccess) {
if (logger.isDebugEnabled()) {
logger.debug(
"[{}]: sending [{}] entry, [{}] bulk request",
task.getId(),
request.requests().size(),
new ByteSizeValue(request.estimatedSizeInBytes())
);
}
logger.debug(
"[{}]: sending [{}] entry, [{}] bulk request",
() -> task.getId(),
() -> request.requests().size(),
() -> new ByteSizeValue(request.estimatedSizeInBytes())
);
if (task.isCancelled()) {
logger.debug("[{}]: finishing early because the task was cancelled", task.getId());
finishHim(null);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -228,18 +228,16 @@ private Bootstrap createClientBootstrap(SharedGroupFactory.SharedGroup sharedGro

private void createServerBootstrap(ProfileSettings profileSettings, SharedGroupFactory.SharedGroup sharedGroup) {
String name = profileSettings.profileName;
if (logger.isDebugEnabled()) {
logger.debug(
"using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], receive_predictor[{}->{}]",
name,
sharedGroupFactory.getTransportWorkerCount(),
profileSettings.portOrRange,
profileSettings.bindHosts,
profileSettings.publishHosts,
receivePredictorMin,
receivePredictorMax
);
}
logger.debug(
"using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], receive_predictor[{}->{}]",
() -> name,
() -> sharedGroupFactory.getTransportWorkerCount(),
() -> profileSettings.portOrRange,
() -> profileSettings.bindHosts,
() -> profileSettings.publishHosts,
() -> receivePredictorMin,
() -> receivePredictorMax
);

final ServerBootstrap serverBootstrap = new ServerBootstrap();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,15 +215,13 @@ public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress, Sock
? inetSocketAddress.getHostName()
: inetSocketAddress.getHostString();

if (log.isDebugEnabled()) {
log.debug(
"Hostname of peer is {} ({}/{}) with hostnameVerificationResolveHostName: {}",
hostname,
inetSocketAddress.getHostName(),
inetSocketAddress.getHostString(),
hostnameVerificationResovleHostName
);
}
log.debug(
"Hostname of peer is {} ({}/{}) with hostnameVerificationResolveHostName: {}",
() -> hostname,
() -> inetSocketAddress.getHostName(),
() -> inetSocketAddress.getHostString(),
() -> hostnameVerificationResovleHostName
);

sslEngine = secureTransportSettingsProvider.buildSecureClientTransportEngine(
settings,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,16 +107,14 @@ class AwsEc2SeedHostsProvider implements SeedHostsProvider {
instanceStates.add("running");
instanceStates.add("pending");

if (logger.isDebugEnabled()) {
logger.debug(
"using host_type [{}], tags [{}], groups [{}] with any_group [{}], availability_zones [{}]",
hostType,
tags,
groups,
bindAnyGroup,
availabilityZones
);
}
logger.debug(
"using host_type [{}], tags [{}], groups [{}] with any_group [{}], availability_zones [{}]",
() -> hostType,
() -> tags,
() -> groups,
() -> bindAnyGroup,
() -> availabilityZones
);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ protected synchronized HttpTransport getGceHttpTransport() throws GeneralSecurit
public synchronized Compute client() {
if (refreshInterval != null && refreshInterval.millis() != 0) {
if (client != null && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) {
if (logger.isTraceEnabled()) logger.trace("using cache to retrieve client");
logger.trace(() -> "using cache to retrieve client");
return client;
}
lastRefresh = System.currentTimeMillis();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,7 @@ public GceSeedHostsProvider(
this.zones = gceInstancesService.zones();

this.tags = TAGS_SETTING.get(settings);
if (logger.isDebugEnabled()) {
logger.debug("using tags {}", this.tags);
}
logger.debug("using tags {}", () -> this.tags);
}

/**
Expand All @@ -133,7 +131,7 @@ public List<TransportAddress> getSeedAddresses(HostsResolver hostsResolver) {
if (refreshInterval.millis() != 0) {
if (cachedDynamicHosts != null
&& (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) {
if (logger.isTraceEnabled()) logger.trace("using cache to retrieve node list");
logger.trace(() -> "using cache to retrieve node list");
return cachedDynamicHosts;
}
lastRefresh = System.currentTimeMillis();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -486,9 +486,7 @@ private ClusterHealthResponse clusterHealth(
int numberOfInFlightFetch,
TimeValue pendingTaskTimeInQueue
) {
if (logger.isTraceEnabled()) {
logger.trace("Calculating health based on state version [{}]", clusterState.version());
}
logger.trace("Calculating health based on state version [{}]", () -> clusterState.version());

String[] concreteIndices;
if (request.level().equals(ClusterHealthRequest.Level.AWARENESS_ATTRIBUTES)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -232,9 +232,7 @@ private AsyncShardsAction(FieldCapabilitiesIndexRequest request, ActionListener<
this.listener = listener;

ClusterState clusterState = clusterService.state();
if (logger.isTraceEnabled()) {
logger.trace("executing [{}] based on cluster state version [{}]", request, clusterState.version());
}
logger.trace("executing [{}] based on cluster state version [{}]", () -> request, () -> clusterState.version());
nodes = clusterState.nodes();
ClusterBlockException blockException = checkGlobalBlock(clusterState);
if (blockException != null) {
Expand Down Expand Up @@ -306,9 +304,7 @@ private void tryNext(@Nullable final Exception lastFailure, boolean canMatchShar
onFailure(shardRouting, new NoShardAvailableActionException(shardRouting.shardId()));
} else {
request.shardId(shardRouting.shardId());
if (logger.isTraceEnabled()) {
logger.trace("sending request [{}] on node [{}]", request, node);
}
logger.trace("sending request [{}] on node [{}]", () -> request, () -> node);
transportService.sendRequest(
node,
ACTION_SHARD_NAME,
Expand Down Expand Up @@ -354,9 +350,7 @@ private class ShardTransportHandler implements TransportRequestHandler<FieldCapa
@Override
public void messageReceived(final FieldCapabilitiesIndexRequest request, final TransportChannel channel, Task task)
throws Exception {
if (logger.isTraceEnabled()) {
logger.trace("executing [{}]", request);
}
logger.trace("executing [{}]", () -> request);
ActionListener<FieldCapabilitiesIndexResponse> listener = new ChannelActionListener<>(channel, ACTION_SHARD_NAME, request);
executor.execute(ActionRunnable.supply(listener, () -> shardOperation(request)));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -402,48 +402,36 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha
// successful retries can reset the failures to null
ShardOperationFailedException[] shardSearchFailures = buildShardFailures();
if (shardSearchFailures.length > 0) {
if (logger.isDebugEnabled()) {
int numShardFailures = shardSearchFailures.length;
shardSearchFailures = ExceptionsHelper.groupBy(shardSearchFailures);
Throwable cause = OpenSearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
logger.debug(
() -> new ParameterizedMessage("{} shards failed for phase: [{}]", numShardFailures, getName()),
cause
);
}
logger.debug(
() -> new ParameterizedMessage("{} shards failed for phase: [{}]", shardSearchFailures.length, getName()),
OpenSearchException.guessRootCauses(ExceptionsHelper.groupBy(shardSearchFailures)[0].getCause())[0]
);
onPhaseFailure(currentPhase, "Partial shards failure", null);
return;
} else {
int discrepancy = getNumShards() - successfulOps.get();
assert discrepancy > 0 : "discrepancy: " + discrepancy;
if (logger.isDebugEnabled()) {
logger.debug(
"Partial shards failure (unavailable: {}, successful: {}, skipped: {}, num-shards: {}, phase: {})",
discrepancy,
successfulOps.get(),
skippedOps.get(),
getNumShards(),
currentPhase.getName()
);
}
logger.debug(
"Partial shards failure (unavailable: {}, successful: {}, skipped: {}, num-shards: {}, phase: {})",
() -> discrepancy,
() -> successfulOps.get(),
() -> skippedOps.get(),
() -> getNumShards(),
() -> currentPhase.getName()
);
if (!request.indicesOptions().ignoreUnavailable()) {
onPhaseFailure(currentPhase, "Partial shards failure (" + discrepancy + " shards unavailable)", null);
return;
}
}
}
if (logger.isTraceEnabled()) {
final String resultsFrom = results.getSuccessfulResults()
.map(r -> r.getSearchShardTarget().toString())
.collect(Collectors.joining(","));
logger.trace(
"[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})",
currentPhase.getName(),
nextPhase.getName(),
resultsFrom,
clusterState.version()
);
}
logger.trace(
"[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})",
() -> currentPhase.getName(),
() -> nextPhase.getName(),
() -> results.getSuccessfulResults().map(r -> r.getSearchShardTarget().toString()).collect(Collectors.joining(",")),
() -> clusterState.version()
);
onPhaseEnd(searchRequestContext);
executePhase(nextPhase);
}
Expand Down Expand Up @@ -480,9 +468,7 @@ private void executePhase(SearchPhase phase) {
onPhaseStart(phase);
phase.recordAndRun();
} catch (Exception e) {
if (logger.isDebugEnabled()) {
logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e);
}
logger.debug(() -> new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e);

if (currentPhaseHasLifecycle == false) {
phaseSpan.setError(e);
Expand Down Expand Up @@ -621,9 +607,7 @@ protected void onShardResult(Result result, SearchShardIterator shardIt) {
assert result.getShardIndex() != -1 : "shard index is not set";
assert result.getSearchShardTarget() != null : "search shard target must not be null";
hasShardResponse.set(true);
if (logger.isTraceEnabled()) {
logger.trace("got first-phase result from {}", result != null ? result.getSearchShardTarget() : null);
}
logger.trace("got first-phase result from {}", () -> result != null ? result.getSearchShardTarget() : null);
this.setPhaseResourceUsages();
results.consumeResult(result, () -> onShardResultConsumed(result, shardIt));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -317,9 +317,7 @@ protected void onShardFailure(
@Nullable SearchShardTarget searchShardTarget,
Supplier<SearchPhase> nextPhaseSupplier
) {
if (logger.isDebugEnabled()) {
logger.debug(new ParameterizedMessage("[{}] Failed to execute {} phase", searchId, phaseName), failure);
}
logger.debug(() -> new ParameterizedMessage("[{}] Failed to execute {} phase", searchId, phaseName), failure);
addShardFailure(new ShardSearchFailure(failure, searchShardTarget));
int successfulOperations = successfulOps.decrementAndGet();
assert successfulOperations >= 0 : "successfulOperations must be >= 0 but was: " + successfulOperations;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,33 +256,29 @@ void onOperation(@Nullable ShardRouting shard, final ShardIterator shardIt, int

if (nextShard != null) {
if (e != null) {
if (logger.isTraceEnabled()) {
if (!TransportActions.isShardNotAvailableException(e)) {
logger.trace(
new ParameterizedMessage(
"{}: failed to execute [{}]",
shard != null ? shard.shortSummary() : shardIt.shardId(),
request
),
e
);
}
if (!TransportActions.isShardNotAvailableException(e)) {
logger.trace(
() -> new ParameterizedMessage(
"{}: failed to execute [{}]",
shard != null ? shard.shortSummary() : shardIt.shardId(),
request
),
e
);
}
}
performOperation(shardIt, nextShard, shardIndex);
} else {
if (logger.isDebugEnabled()) {
if (e != null) {
if (!TransportActions.isShardNotAvailableException(e)) {
logger.debug(
new ParameterizedMessage(
"{}: failed to execute [{}]",
shard != null ? shard.shortSummary() : shardIt.shardId(),
request
),
e
);
}
if (e != null) {
if (!TransportActions.isShardNotAvailableException(e)) {
logger.debug(
() -> new ParameterizedMessage(
"{}: failed to execute [{}]",
shard != null ? shard.shortSummary() : shardIt.shardId(),
request
),
e
);
}
}
if (expectedOps == counterOps.incrementAndGet()) {
Expand Down
Loading

0 comments on commit 825529a

Please sign in to comment.