Skip to content

Commit

Permalink
improved logging
Browse files Browse the repository at this point in the history
  • Loading branch information
belliottsmith committed Oct 12, 2024
1 parent 9cc9b3a commit 35704ba
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,11 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;

import com.google.common.primitives.Ints;
import org.slf4j.Logger;
Expand Down Expand Up @@ -85,6 +88,7 @@ public class CoordinateDurabilityScheduling
private final Node node;
private Scheduler.Scheduled scheduled;
private final AtomicReference<Ranges> active = new AtomicReference<>(Ranges.EMPTY);
private final ConcurrentHashMap<TxnId, Ranges> coordinating = new ConcurrentHashMap<>();

/*
* In each round at each node wait this amount of time between initiating new CoordinateShardDurable
Expand Down Expand Up @@ -216,7 +220,8 @@ private void run()
Ranges inactiveRanges = ranges.without(active.get());
if (!inactiveRanges.equals(ranges))
{
logger.info("Not initiating new durability scheduling for {} as previous attempt still in progress", ranges.without(inactiveRanges));
String waitingOn = coordinating.entrySet().stream().filter(e -> e.getValue().intersects(ranges)).map(Objects::toString).collect(Collectors.joining(", ", "[", "]"));
logger.info("Not initiating new durability scheduling for {} as previous attempt(s) {} still in progress (scheduling {})", ranges.without(inactiveRanges), waitingOn, inactiveRanges);
if (inactiveRanges.isEmpty())
continue;
}
Expand All @@ -232,6 +237,7 @@ private void run()
private void startShardSync(Ranges ranges)
{
TxnId at = node.nextTxnId(ExclusiveSyncPoint, Domain.Range);
coordinating.put(at, ranges);
node.scheduler().once(() -> node.withEpoch(at.epoch(), (ignored, withEpochFailure) -> {
FullRoute<Range> route = (FullRoute<Range>) node.computeRoute(at, ranges);
if (withEpochFailure != null)
Expand All @@ -246,6 +252,7 @@ private void startShardSync(Ranges ranges)
if (fail != null)
{
logger.trace("{}: Exception coordinating ExclusiveSyncPoint for local shard durability of {}", at, ranges, fail);
coordinating.remove(at);
active.accumulateAndGet(route.toRanges(), Ranges::without);
}
else
Expand All @@ -270,6 +277,7 @@ private void coordinateShardDurableAfterExclusiveSyncPoint(Node node, SyncPoint<
}
else
{
coordinating.remove(exclusiveSyncPoint.syncId);
active.accumulateAndGet(exclusiveSyncPoint.route.toRanges(), Ranges::without);
}
});
Expand Down
4 changes: 3 additions & 1 deletion accord-core/src/main/java/accord/local/CommandStore.java
Original file line number Diff line number Diff line change
Expand Up @@ -487,6 +487,7 @@ private void fetchMajorityDeps(AsyncResults.SettableResult<Void> coordination, N
CollectCalculatedDeps.withCalculatedDeps(node, id, route, route, before, (deps, fail) -> {
if (fail != null)
{
logger.warn("Failed to fetch deps for syncing epoch {} for ranges {}", epoch, ranges, fail);
node.scheduler().once(() -> fetchMajorityDeps(coordination, node, epoch, ranges), 1L, TimeUnit.MINUTES);
node.agent().onUncaughtException(fail);
}
Expand All @@ -499,8 +500,9 @@ private void fetchMajorityDeps(AsyncResults.SettableResult<Void> coordination, N
}).begin((success, fail2) -> {
if (fail2 != null)
{
node.agent().onUncaughtException(fail2);
logger.warn("Failed to apply deps for syncing epoch {} for ranges {}", epoch, ranges, fail2);
node.scheduler().once(() -> fetchMajorityDeps(coordination, node, epoch, ranges), 1L, TimeUnit.MINUTES);
node.agent().onUncaughtException(fail2);
}
else
{
Expand Down

0 comments on commit 35704ba

Please sign in to comment.