From e1f1a84854cb464b324a71e7960938191d363507 Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Wed, 22 May 2019 20:10:00 -0700 Subject: [PATCH 01/10] HDDS-1555. Disable install snapshot for ContainerStateMachine. --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 2 +- .../server/ratis/ContainerStateMachine.java | 11 +++++++++++ .../server/ratis/XceiverServerRatis.java | 18 ++++++++++++++++-- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index ae09c9d6d9d9a..d1cd438cbdc14 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -146,7 +146,7 @@ public final class ScmConfigKeys { public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = "dfs.ratis.snapshot.threshold"; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 10000; + public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY = "dfs.ratis.server.failure.duration"; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 7a7baec3001b2..a3d26b787e05d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -667,6 +667,17 @@ public void notifyNotLeader(Collection pendingEntries) evictStateMachineCache(); } + @Override + public CompletableFuture notifyInstallSnapshotFromLeader( + RaftGroup group, RoleInfoProto roleInfoProto, + TermIndex firstTermIndexInLog) { + ratisServer.handleInstallSnapshotFromLeader(group, roleInfoProto, + firstTermIndexInLog); + final CompletableFuture future = new CompletableFuture<>(); + future.complete(firstTermIndexInLog); + return future; + } + @Override public void close() throws IOException { evictStateMachineCache(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 424281891b680..e3cdb2d1f753b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -66,6 +66,7 @@ import org.apache.ratis.proto.RaftProtos; import org.apache.ratis.proto.RaftProtos.RoleInfoProto; import org.apache.ratis.proto.RaftProtos.ReplicationLevel; +import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.util.SizeInBytes; import org.apache.ratis.util.TimeDuration; import org.slf4j.Logger; @@ -240,8 +241,9 @@ private RaftProperties newRaftProperties(Configuration conf) { OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); - RaftServerConfigKeys.Log.setElementLimit(properties, logQueueNumElements); - RaftServerConfigKeys.Log.setByteLimit(properties, logQueueByteLimit); + RaftServerConfigKeys.Log.setQueueElementLimit( + properties, logQueueNumElements); + RaftServerConfigKeys.Log.setQueueByteLimit(properties, logQueueByteLimit); int numSyncRetries = conf.getInt( OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, @@ -253,6 +255,10 @@ private RaftProperties newRaftProperties(Configuration conf) { // Enable the StateMachineCaching RaftServerConfigKeys.Log.StateMachineData .setCachingEnabled(properties, true); + + RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties, + false); + return properties; } @@ -597,4 +603,12 @@ void handleNodeSlowness(RaftGroup group, RoleInfoProto roleInfoProto) { void handleNoLeader(RaftGroup group, RoleInfoProto roleInfoProto) { handlePipelineFailure(group.getGroupId(), roleInfoProto); } + + void handleInstallSnapshotFromLeader(RaftGroup group, + RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { + LOG.warn("Install snapshot notification received from Leader with " + + "termIndex : " + firstTermIndexInLog + + ", terminating pipeline " + group.getGroupId()); + handlePipelineFailure(group.getGroupId(), roleInfoProto); + } } \ No newline at end of file From ac07b986b351cfc84f66c9e6a82516d19434461d Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Fri, 31 May 2019 14:23:50 -0700 Subject: [PATCH 02/10] HDDS-1555. Remove redundant groupId from signature of notify. --- .../server/ratis/ContainerStateMachine.java | 14 +++---- .../server/ratis/XceiverServerRatis.java | 37 ++++++++++++------- 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index a3d26b787e05d..3d99ff207378c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -651,14 +651,13 @@ private void evictStateMachineCache() { } @Override - public void notifySlowness(RaftGroup group, RoleInfoProto roleInfoProto) { - ratisServer.handleNodeSlowness(group, roleInfoProto); + public void notifySlowness(RoleInfoProto roleInfoProto) { + ratisServer.handleNodeSlowness(gid, roleInfoProto); } @Override - public void notifyExtendedNoLeader(RaftGroup group, - RoleInfoProto roleInfoProto) { - ratisServer.handleNoLeader(group, roleInfoProto); + public void notifyExtendedNoLeader(RoleInfoProto roleInfoProto) { + ratisServer.handleNoLeader(gid, roleInfoProto); } @Override @@ -669,9 +668,8 @@ public void notifyNotLeader(Collection pendingEntries) @Override public CompletableFuture notifyInstallSnapshotFromLeader( - RaftGroup group, RoleInfoProto roleInfoProto, - TermIndex firstTermIndexInLog) { - ratisServer.handleInstallSnapshotFromLeader(group, roleInfoProto, + RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { + ratisServer.handleInstallSnapshotFromLeader(gid, roleInfoProto, firstTermIndexInLog); final CompletableFuture future = new CompletableFuture<>(); future.complete(firstTermIndexInLog); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index e3cdb2d1f753b..e77ea67b4540e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -241,8 +241,7 @@ private RaftProperties newRaftProperties(Configuration conf) { OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); - RaftServerConfigKeys.Log.setQueueElementLimit( - properties, logQueueNumElements); + RaftServerConfigKeys.Log.setQueueElementLimit(properties, logQueueNumElements); RaftServerConfigKeys.Log.setQueueByteLimit(properties, logQueueByteLimit); int numSyncRetries = conf.getInt( @@ -253,8 +252,8 @@ private RaftProperties newRaftProperties(Configuration conf) { numSyncRetries); // Enable the StateMachineCaching - RaftServerConfigKeys.Log.StateMachineData - .setCachingEnabled(properties, true); + RaftServerConfigKeys.Log.StateMachineData.setCachingEnabled( + properties, true); RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties, false); @@ -596,19 +595,31 @@ public List getPipelineIds() { return pipelineIDs; } - void handleNodeSlowness(RaftGroup group, RoleInfoProto roleInfoProto) { - handlePipelineFailure(group.getGroupId(), roleInfoProto); + void handleNodeSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto) { + handlePipelineFailure(groupId, roleInfoProto); } - void handleNoLeader(RaftGroup group, RoleInfoProto roleInfoProto) { - handlePipelineFailure(group.getGroupId(), roleInfoProto); + void handleNoLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto) { + handlePipelineFailure(groupId, roleInfoProto); } - void handleInstallSnapshotFromLeader(RaftGroup group, - RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { + /** + * The fact that the snapshot contents cannot be used to actually catch up + * the follower, it is the reason to initiate close pipeline and + * not install the snapshot. The follower will basically never be able to + * catch up. + * + * @param groupId raft group information + * @param roleInfoProto information about the current node role and rpc delay information + * @param firstTermIndexInLog After the snapshot installation is complete, + * return the last included term index in the snapshot. + */ + void handleInstallSnapshotFromLeader(RaftGroupId groupId, + RoleInfoProto roleInfoProto, + TermIndex firstTermIndexInLog) { LOG.warn("Install snapshot notification received from Leader with " + "termIndex : " + firstTermIndexInLog + - ", terminating pipeline " + group.getGroupId()); - handlePipelineFailure(group.getGroupId(), roleInfoProto); + ", terminating pipeline " + groupId); + handlePipelineFailure(groupId, roleInfoProto); } -} \ No newline at end of file +} From 68653ed7c48b74de1b441c1674f5135f989785b7 Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Tue, 4 Jun 2019 12:34:52 -0700 Subject: [PATCH 03/10] HDDS-1555. Disable install snapshot for ContainerStateMachine. --- .../transport/server/ratis/ContainerStateMachine.java | 1 - .../common/transport/server/ratis/XceiverServerRatis.java | 7 ++++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 3d99ff207378c..ab7fe8eaa1ca3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.ratis.proto.RaftProtos.RaftPeerRole; -import org.apache.ratis.protocol.RaftGroup; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.impl.RaftServerConstants; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index e77ea67b4540e..fc379872f7dd6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -57,7 +57,6 @@ import org.apache.ratis.protocol.NotLeaderException; import org.apache.ratis.protocol.StateMachineException; import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.protocol.RaftGroup; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.rpc.SupportedRpcType; @@ -241,7 +240,8 @@ private RaftProperties newRaftProperties(Configuration conf) { OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); - RaftServerConfigKeys.Log.setQueueElementLimit(properties, logQueueNumElements); + RaftServerConfigKeys.Log.setQueueElementLimit( + properties, logQueueNumElements); RaftServerConfigKeys.Log.setQueueByteLimit(properties, logQueueByteLimit); int numSyncRetries = conf.getInt( @@ -610,7 +610,8 @@ void handleNoLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto) { * catch up. * * @param groupId raft group information - * @param roleInfoProto information about the current node role and rpc delay information + * @param roleInfoProto information about the current node role and + * rpc delay information. * @param firstTermIndexInLog After the snapshot installation is complete, * return the last included term index in the snapshot. */ From 8e9b1308370df8fae73371936d5660ab5189f006 Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Wed, 5 Jun 2019 21:47:36 -0700 Subject: [PATCH 04/10] HDDS-1555. Update ratis.version to latest snapshot. --- hadoop-hdds/pom.xml | 2 +- hadoop-ozone/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 0e87c2cc8db9f..93b59f03dce3c 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -47,7 +47,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> 0.5.0-SNAPSHOT - 0.4.0-fe2b15d-SNAPSHOT + 0.4.0-300d9c5-SNAPSHOT 1.60 diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 9fa1c8b6d506f..b55cf68ea50e3 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -29,7 +29,7 @@ 3.2.0 0.5.0-SNAPSHOT 0.5.0-SNAPSHOT - 0.4.0-fe2b15d-SNAPSHOT + 0.4.0-300d9c5-SNAPSHOT 1.60 Crater Lake ${ozone.version} From 011cb71273db6c2fd26e8d741ed072485fc261b9 Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Thu, 6 Jun 2019 08:45:20 -0700 Subject: [PATCH 05/10] HDDS-1555. Fix issues discovered by ratis version update. --- .../hadoop/hdds/scm/XceiverClientRatis.java | 13 ++-- .../om/ratis/OzoneManagerRatisClient.java | 59 +++++++++---------- 2 files changed, 35 insertions(+), 37 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index efd82bce7bbd5..9bf0b8e569127 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -19,6 +19,8 @@ package org.apache.hadoop.hdds.scm; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; + import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -30,6 +32,7 @@ import org.apache.ratis.grpc.GrpcTlsConfig; import org.apache.ratis.proto.RaftProtos; import org.apache.ratis.protocol.GroupMismatchException; +import org.apache.ratis.protocol.RaftException; import org.apache.ratis.protocol.RaftRetryFailureException; import org.apache.ratis.retry.RetryPolicy; import org.apache.ratis.thirdparty.com.google.protobuf @@ -309,10 +312,7 @@ public XceiverClientReply sendCommandAsync( Time.monotonicNowNanos() - requestTime); }).thenApply(reply -> { try { - // we need to handle RaftRetryFailure Exception - RaftRetryFailureException raftRetryFailureException = - reply.getRetryFailureException(); - if (raftRetryFailureException != null) { + if (!reply.isSuccess()) { // in case of raft retry failure, the raft client is // not able to connect to the leader hence the pipeline // can not be used but this instance of RaftClient will close @@ -324,7 +324,10 @@ public XceiverClientReply sendCommandAsync( // to SCM as in this case, it is the raft client which is not // able to connect to leader in the pipeline, though the // pipeline can still be functional. - throw new CompletionException(raftRetryFailureException); + RaftException exception = reply.getException(); + Preconditions.checkNotNull(exception, "Raft reply failure but " + + "no exception propagated."); + throw new CompletionException(exception); } ContainerCommandResponseProto response = ContainerCommandResponseProto diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java index cd99cd1fab231..2cbef50cb0492 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.om.ratis; +import static org.apache.hadoop.ozone.om.exceptions.OMException.STATUS_CODE; + import java.io.Closeable; import java.io.IOException; import java.util.concurrent.CompletableFuture; @@ -24,23 +26,18 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.ServiceException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.ratis.client.RaftClient; import org.apache.ratis.protocol.Message; import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.protocol.RaftException; import org.apache.ratis.protocol.RaftGroup; -import org.apache.ratis.protocol.RaftRetryFailureException; import org.apache.ratis.protocol.StateMachineException; import org.apache.ratis.retry.RetryPolicies; import org.apache.ratis.retry.RetryPolicy; @@ -51,7 +48,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.om.exceptions.OMException.STATUS_CODE; +import com.google.common.base.Preconditions; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.ServiceException; /** * OM Ratis client to interact with OM Ratis server endpoint. @@ -167,29 +166,25 @@ private CompletableFuture sendCommandAsync(OMRequest request) { CompletableFuture raftClientReply = sendRequestAsync(request); - CompletableFuture omRatisResponse = - raftClientReply.whenComplete((reply, e) -> LOG.debug( - "received reply {} for request: cmdType={} traceID={} " + - "exception: {}", reply, request.getCmdType(), - request.getTraceID(), e)) - .thenApply(reply -> { - try { - // we need to handle RaftRetryFailure Exception - RaftRetryFailureException raftRetryFailureException = - reply.getRetryFailureException(); - if (raftRetryFailureException != null) { - throw new CompletionException(raftRetryFailureException); - } - - OMResponse response = OMRatisHelper - .getOMResponseFromRaftClientReply(reply); - - return response; - } catch (InvalidProtocolBufferException e) { - throw new CompletionException(e); - } - }); - return omRatisResponse; + return raftClientReply.whenComplete((reply, e) -> LOG.debug( + "received reply {} for request: cmdType={} traceID={} " + + "exception: {}", reply, request.getCmdType(), + request.getTraceID(), e)) + .thenApply(reply -> { + try { + Preconditions.checkNotNull(reply); + if (!reply.isSuccess()) { + RaftException exception = reply.getException(); + Preconditions.checkNotNull(exception, "Raft reply failure " + + "but no exception propagated."); + throw new CompletionException(exception); + } + return OMRatisHelper.getOMResponseFromRaftClientReply(reply); + + } catch (InvalidProtocolBufferException e) { + throw new CompletionException(e); + } + }); } /** From d5faf6e89bbca77130cd878ca8eb456c92987942 Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Thu, 6 Jun 2019 13:43:14 -0700 Subject: [PATCH 06/10] HDDS-1555. Checkstyle fix. --- .../hadoop/hdds/scm/XceiverClientRatis.java | 61 +++++++++---------- 1 file changed, 30 insertions(+), 31 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index 9bf0b8e569127..4a90e489c9bc1 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -18,55 +18,54 @@ package org.apache.hadoop.hdds.scm; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.OptionalLong; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.x509.SecurityConfig; - -import io.opentracing.Scope; -import io.opentracing.util.GlobalTracer; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.util.Time; +import org.apache.ratis.RatisHelper; +import org.apache.ratis.client.RaftClient; import org.apache.ratis.grpc.GrpcTlsConfig; import org.apache.ratis.proto.RaftProtos; import org.apache.ratis.protocol.GroupMismatchException; +import org.apache.ratis.protocol.RaftClientReply; import org.apache.ratis.protocol.RaftException; -import org.apache.ratis.protocol.RaftRetryFailureException; import org.apache.ratis.retry.RetryPolicy; -import org.apache.ratis.thirdparty.com.google.protobuf - .InvalidProtocolBufferException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.tracing.TracingUtil; - -import org.apache.ratis.RatisHelper; -import org.apache.ratis.client.RaftClient; -import org.apache.ratis.protocol.RaftClientReply; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.ratis.util.TimeDuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.*; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.ConcurrentHashMap; -import java.util.stream.Collectors; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; + +import io.opentracing.Scope; +import io.opentracing.util.GlobalTracer; /** * An abstract implementation of {@link XceiverClientSpi} using Ratis. From 994ef662ce52ae76fd82147d04ca109261aa8d72 Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Tue, 11 Jun 2019 10:42:23 -0700 Subject: [PATCH 07/10] HDDS-1555. Parameterized logging fixed. --- .../common/transport/server/ratis/XceiverServerRatis.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index fc379872f7dd6..f73355cb9291e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -619,8 +619,8 @@ void handleInstallSnapshotFromLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { LOG.warn("Install snapshot notification received from Leader with " + - "termIndex : " + firstTermIndexInLog + - ", terminating pipeline " + groupId); + "termIndex: {}, terminating pipeline: {}", + firstTermIndexInLog, groupId); handlePipelineFailure(groupId, roleInfoProto); } } From 43c548a9aec3a98bec3f0aa011294d466cf0a505 Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Fri, 28 Jun 2019 08:55:38 -0700 Subject: [PATCH 08/10] HDDS-1555. Update to latest ratis version and add purge gap config. --- .../main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 5 +++++ .../main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java | 4 ++++ .../common/transport/server/ratis/XceiverServerRatis.java | 5 +++++ hadoop-hdds/pom.xml | 2 +- hadoop-ozone/pom.xml | 2 +- 5 files changed, 16 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index d1cd438cbdc14..a98739900c96f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -107,6 +107,11 @@ public final class ScmConfigKeys { "dfs.container.ratis.log.appender.queue.byte-limit"; public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; + public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = + "dfs.container.ratis.log.purge.gap"; + // TODO: Set to 1024 once RATIS issue around purge is fixed. + public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + 1000000000; // expiry interval stateMachineData cache entry inside containerStateMachine public static final String DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 1463c43e830f3..b77cca35a870f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -322,6 +322,10 @@ public final class OzoneConfigKeys { public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; + public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = + ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP; + public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY = ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY; public static final TimeDuration diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index f73355cb9291e..246d58af2010f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -258,6 +258,11 @@ private RaftProperties newRaftProperties(Configuration conf) { RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties, false); + int purgeGap = conf.getInt( + OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, + OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); + RaftServerConfigKeys.Log.setPurgeGap(properties, purgeGap); + return properties; } diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 93b59f03dce3c..12ed0a3f78650 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -47,7 +47,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> 0.5.0-SNAPSHOT - 0.4.0-300d9c5-SNAPSHOT + 0.4.0-2337318-SNAPSHOT 1.60 diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index b55cf68ea50e3..235627608915c 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -29,7 +29,7 @@ 3.2.0 0.5.0-SNAPSHOT 0.5.0-SNAPSHOT - 0.4.0-300d9c5-SNAPSHOT + 0.4.0-2337318-SNAPSHOT 1.60 Crater Lake ${ozone.version} From 29d3e933ba413915db9a3f5d23fc0e0e1fede752 Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Fri, 28 Jun 2019 15:09:28 -0700 Subject: [PATCH 09/10] HDDS-1555. Config unit test fix. --- .../src/main/resources/ozone-default.xml | 8 ++ .../hadoop/hdds/conf/ConfigFileGenerator.java | 120 +++++++++--------- 2 files changed, 68 insertions(+), 60 deletions(-) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 427def917e1ad..e07f000b1dd63 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -104,6 +104,14 @@ Byte limit for ratis leader's log appender queue. + + dfs.container.ratis.log.purge.gap + 1024 + OZONE, DEBUG, CONTAINER, RATIS + Purge gap between the last purged commit index + and the current index, when the leader decides to purge its log. + + dfs.container.ratis.datanode.storage.dir diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java index e9e88a0898805..f07db8b4c6955 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java @@ -46,66 +46,66 @@ public class ConfigFileGenerator extends AbstractProcessor { @Override public boolean process(Set annotations, RoundEnvironment roundEnv) { - if (roundEnv.processingOver()) { - return false; - } - - Filer filer = processingEnv.getFiler(); - - try { - - //load existing generated config (if exists) - ConfigFileAppender appender = new ConfigFileAppender(); - try (InputStream input = filer - .getResource(StandardLocation.CLASS_OUTPUT, "", - OUTPUT_FILE_NAME).openInputStream()) { - appender.load(input); - } catch (FileNotFoundException ex) { - appender.init(); - } - - Set annotatedElements = - roundEnv.getElementsAnnotatedWith(ConfigGroup.class); - for (Element annotatedElement : annotatedElements) { - TypeElement configGroup = (TypeElement) annotatedElement; - - //check if any of the setters are annotated with @Config - for (Element element : configGroup.getEnclosedElements()) { - if (element.getKind() == ElementKind.METHOD) { - processingEnv.getMessager() - .printMessage(Kind.WARNING, element.getSimpleName().toString()); - if (element.getSimpleName().toString().startsWith("set") - && element.getAnnotation(Config.class) != null) { - - //update the ozone-site-generated.xml - Config configAnnotation = element.getAnnotation(Config.class); - ConfigGroup configGroupAnnotation = - configGroup.getAnnotation(ConfigGroup.class); - - String key = configGroupAnnotation.prefix() + "." - + configAnnotation.key(); - - appender.addConfig(key, - configAnnotation.defaultValue(), - configAnnotation.description(), - configAnnotation.tags()); - } - } - - } - FileObject resource = filer - .createResource(StandardLocation.CLASS_OUTPUT, "", - OUTPUT_FILE_NAME); - - try (Writer writer = new OutputStreamWriter( - resource.openOutputStream(), StandardCharsets.UTF_8)) { - appender.write(writer); - } - } - } catch (IOException e) { - processingEnv.getMessager().printMessage(Kind.ERROR, - "Can't generate the config file from annotation: " + e.getMessage()); - } +// if (roundEnv.processingOver()) { +// return false; +// } +// +// Filer filer = processingEnv.getFiler(); +// +// try { +// +// //load existing generated config (if exists) +// ConfigFileAppender appender = new ConfigFileAppender(); +// try (InputStream input = filer +// .getResource(StandardLocation.CLASS_OUTPUT, "", +// OUTPUT_FILE_NAME).openInputStream()) { +// appender.load(input); +// } catch (FileNotFoundException ex) { +// appender.init(); +// } +// +// Set annotatedElements = +// roundEnv.getElementsAnnotatedWith(ConfigGroup.class); +// for (Element annotatedElement : annotatedElements) { +// TypeElement configGroup = (TypeElement) annotatedElement; +// +// //check if any of the setters are annotated with @Config +// for (Element element : configGroup.getEnclosedElements()) { +// if (element.getKind() == ElementKind.METHOD) { +// processingEnv.getMessager() +// .printMessage(Kind.WARNING, element.getSimpleName().toString()); +// if (element.getSimpleName().toString().startsWith("set") +// && element.getAnnotation(Config.class) != null) { +// +// //update the ozone-site-generated.xml +// Config configAnnotation = element.getAnnotation(Config.class); +// ConfigGroup configGroupAnnotation = +// configGroup.getAnnotation(ConfigGroup.class); +// +// String key = configGroupAnnotation.prefix() + "." +// + configAnnotation.key(); +// +// appender.addConfig(key, +// configAnnotation.defaultValue(), +// configAnnotation.description(), +// configAnnotation.tags()); +// } +// } +// +// } +// FileObject resource = filer +// .createResource(StandardLocation.CLASS_OUTPUT, "", +// OUTPUT_FILE_NAME); +// +// try (Writer writer = new OutputStreamWriter( +// resource.openOutputStream(), StandardCharsets.UTF_8)) { +// appender.write(writer); +// } +// } +// } catch (IOException e) { +// processingEnv.getMessager().printMessage(Kind.ERROR, +// "Can't generate the config file from annotation: " + e.getMessage()); +// } return false; } From b28e3afccde11829ec91f90dcb3c84dd0ace831f Mon Sep 17 00:00:00 2001 From: Siddharth Wagle Date: Fri, 28 Jun 2019 21:44:53 -0700 Subject: [PATCH 10/10] HDDS-1555. Fixed review comments. --- .../src/main/resources/ozone-default.xml | 2 +- .../hadoop/hdds/conf/ConfigFileGenerator.java | 120 +++++++++--------- .../server/ratis/ContainerStateMachine.java | 7 +- 3 files changed, 65 insertions(+), 64 deletions(-) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index e07f000b1dd63..c10aa3353a0b2 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -106,7 +106,7 @@ dfs.container.ratis.log.purge.gap - 1024 + 1000000000 OZONE, DEBUG, CONTAINER, RATIS Purge gap between the last purged commit index and the current index, when the leader decides to purge its log. diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java index f07db8b4c6955..e9e88a0898805 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java @@ -46,66 +46,66 @@ public class ConfigFileGenerator extends AbstractProcessor { @Override public boolean process(Set annotations, RoundEnvironment roundEnv) { -// if (roundEnv.processingOver()) { -// return false; -// } -// -// Filer filer = processingEnv.getFiler(); -// -// try { -// -// //load existing generated config (if exists) -// ConfigFileAppender appender = new ConfigFileAppender(); -// try (InputStream input = filer -// .getResource(StandardLocation.CLASS_OUTPUT, "", -// OUTPUT_FILE_NAME).openInputStream()) { -// appender.load(input); -// } catch (FileNotFoundException ex) { -// appender.init(); -// } -// -// Set annotatedElements = -// roundEnv.getElementsAnnotatedWith(ConfigGroup.class); -// for (Element annotatedElement : annotatedElements) { -// TypeElement configGroup = (TypeElement) annotatedElement; -// -// //check if any of the setters are annotated with @Config -// for (Element element : configGroup.getEnclosedElements()) { -// if (element.getKind() == ElementKind.METHOD) { -// processingEnv.getMessager() -// .printMessage(Kind.WARNING, element.getSimpleName().toString()); -// if (element.getSimpleName().toString().startsWith("set") -// && element.getAnnotation(Config.class) != null) { -// -// //update the ozone-site-generated.xml -// Config configAnnotation = element.getAnnotation(Config.class); -// ConfigGroup configGroupAnnotation = -// configGroup.getAnnotation(ConfigGroup.class); -// -// String key = configGroupAnnotation.prefix() + "." -// + configAnnotation.key(); -// -// appender.addConfig(key, -// configAnnotation.defaultValue(), -// configAnnotation.description(), -// configAnnotation.tags()); -// } -// } -// -// } -// FileObject resource = filer -// .createResource(StandardLocation.CLASS_OUTPUT, "", -// OUTPUT_FILE_NAME); -// -// try (Writer writer = new OutputStreamWriter( -// resource.openOutputStream(), StandardCharsets.UTF_8)) { -// appender.write(writer); -// } -// } -// } catch (IOException e) { -// processingEnv.getMessager().printMessage(Kind.ERROR, -// "Can't generate the config file from annotation: " + e.getMessage()); -// } + if (roundEnv.processingOver()) { + return false; + } + + Filer filer = processingEnv.getFiler(); + + try { + + //load existing generated config (if exists) + ConfigFileAppender appender = new ConfigFileAppender(); + try (InputStream input = filer + .getResource(StandardLocation.CLASS_OUTPUT, "", + OUTPUT_FILE_NAME).openInputStream()) { + appender.load(input); + } catch (FileNotFoundException ex) { + appender.init(); + } + + Set annotatedElements = + roundEnv.getElementsAnnotatedWith(ConfigGroup.class); + for (Element annotatedElement : annotatedElements) { + TypeElement configGroup = (TypeElement) annotatedElement; + + //check if any of the setters are annotated with @Config + for (Element element : configGroup.getEnclosedElements()) { + if (element.getKind() == ElementKind.METHOD) { + processingEnv.getMessager() + .printMessage(Kind.WARNING, element.getSimpleName().toString()); + if (element.getSimpleName().toString().startsWith("set") + && element.getAnnotation(Config.class) != null) { + + //update the ozone-site-generated.xml + Config configAnnotation = element.getAnnotation(Config.class); + ConfigGroup configGroupAnnotation = + configGroup.getAnnotation(ConfigGroup.class); + + String key = configGroupAnnotation.prefix() + "." + + configAnnotation.key(); + + appender.addConfig(key, + configAnnotation.defaultValue(), + configAnnotation.description(), + configAnnotation.tags()); + } + } + + } + FileObject resource = filer + .createResource(StandardLocation.CLASS_OUTPUT, "", + OUTPUT_FILE_NAME); + + try (Writer writer = new OutputStreamWriter( + resource.openOutputStream(), StandardCharsets.UTF_8)) { + appender.write(writer); + } + } + } catch (IOException e) { + processingEnv.getMessager().printMessage(Kind.ERROR, + "Can't generate the config file from annotation: " + e.getMessage()); + } return false; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index ab7fe8eaa1ca3..44074e70d54db 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -33,6 +33,7 @@ import org.apache.ratis.server.impl.RaftServerConstants; import org.apache.ratis.server.impl.RaftServerProxy; import org.apache.ratis.server.protocol.TermIndex; +import org.apache.ratis.server.raftlog.RaftLog; import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo; import org.apache.ratis.thirdparty.com.google.protobuf .InvalidProtocolBufferException; @@ -194,12 +195,12 @@ private long loadSnapshot(SingleFileSnapshotInfo snapshot) throws IOException { if (snapshot == null) { TermIndex empty = - TermIndex.newTermIndex(0, RaftServerConstants.INVALID_LOG_INDEX); + TermIndex.newTermIndex(0, RaftLog.INVALID_LOG_INDEX); LOG.info( "The snapshot info is null." + "Setting the last applied index to:" + empty); setLastAppliedTermIndex(empty); - return RaftServerConstants.INVALID_LOG_INDEX; + return RaftLog.INVALID_LOG_INDEX; } final File snapshotFile = snapshot.getFile().getPath().toFile(); @@ -242,7 +243,7 @@ public void persistContainerSet(OutputStream out) throws IOException { public long takeSnapshot() throws IOException { TermIndex ti = getLastAppliedTermIndex(); LOG.info("Taking snapshot at termIndex:" + ti); - if (ti != null && ti.getIndex() != RaftServerConstants.INVALID_LOG_INDEX) { + if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) { final File snapshotFile = storage.getSnapshotFile(ti.getTerm(), ti.getIndex()); LOG.info("Taking a snapshot to file {}", snapshotFile);