Skip to content

Commit e7265ef

Browse files
ktosoyim-lee
andcommitted
=review,membership cleanups, remove plrintlns, fix inversed cond in merge
Co-Authored-By: Yim Lee <[email protected]>
1 parent 92a6808 commit e7265ef

File tree

8 files changed

+11
-102
lines changed

8 files changed

+11
-102
lines changed

Sources/DistributedActors/Clocks/VersionVector.swift

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,6 @@ public struct VersionVector {
9292
}
9393

9494
/// Prune any trace of the passed in replica id.
95-
///
9695
public func pruneReplica(_ replicaId: ReplicaId) -> Self {
9796
var s = self
9897
s.state.removeValue(forKey: replicaId)

Sources/DistributedActors/Cluster/Cluster+Gossip.swift

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -72,10 +72,10 @@ extension Cluster {
7272
return .init(causalRelation: causalRelation, effectiveChanges: [])
7373
}
7474
switch incomingGossipOwnerKnownLocally {
75-
case .some(let locallyKnownTo) where locallyKnownTo.status.isDown:
75+
case .some(let locallyKnownMember) where locallyKnownMember.status.isDown:
7676
// we have NOT removed it yet, but it is down, so we ignore it
7777
return .init(causalRelation: causalRelation, effectiveChanges: [])
78-
case .none where Cluster.MemberStatus.down <= incomingOwnerMember.status:
78+
case .none where incomingOwnerMember.status.isAtLeastDown:
7979
// we have likely removed it, and it is down anyway, so we ignore it completely
8080
return .init(causalRelation: causalRelation, effectiveChanges: [])
8181
default:
@@ -97,19 +97,12 @@ extension Cluster {
9797
// our local view happened strictly _after_ the incoming one, thus it is guaranteed
9898
// it will not provide us with new information; This is only an optimization, and would work correctly without it.
9999
changes = []
100-
// self.seen.merge(selfOwner: self.owner, incoming: incoming)
101100
} else {
102101
// incoming is concurrent, ahead, or same
103102
changes = self.membership.mergeFrom(incoming: incoming.membership, myself: self.owner)
104103
}
105104

106-
// pprint("self.seen = \(self.seen)")
107-
// pprint("incoming = \(incoming.seen)")
108105
self.seen.merge(selfOwner: self.owner, incoming: incoming.seen)
109-
// pprint("self.owner = \(self.owner)")
110-
// pprint("self.seen = \(self.seen)")
111-
//
112-
// pprint("self = \(self)")
113106

114107
// 3) if any removals happened, we need to prune the removed nodes from the seen table
115108
for change in changes

Sources/DistributedActors/Cluster/Cluster+Member.swift

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -246,6 +246,11 @@ extension Cluster.MemberStatus {
246246
self == .down
247247
}
248248

249+
/// Convenience function to check if a status is `.removed` or `.removed`
250+
public var isAtLeastDown: Bool {
251+
self >= .down
252+
}
253+
249254
/// Convenience function to check if a status is `.removed`
250255
public var isRemoved: Bool {
251256
self == .removed

Sources/DistributedActors/Cluster/Cluster+Membership.swift

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ extension Cluster {
189189
}
190190
}
191191

192-
// Implementation nodes: Membership/Member equality
192+
// Implementation notes: Membership/Member equality
193193
// Membership equality is special, as it manually DOES take into account the Member's states (status, reachability),
194194
// whilst the Member equality by itself does not. This is somewhat ugly, however it allows us to perform automatic
195195
// seen table owner version updates whenever "the membership has changed." We may want to move away from this and make
@@ -462,7 +462,6 @@ extension Cluster.Membership {
462462
/// [.down] --> <none> (if some node removed) --> <none>
463463
/// [.down] --> <none> (iff myself node removed) --> .removed
464464
/// <any status> --> [.removed]** --> <none>
465-
/// <any status> --> [.removed]** --> <none>
466465
///
467466
/// * `.removed` is never stored EXCEPT if the `myself` member has been seen removed by other members of the cluster.
468467
/// ** `.removed` should never be gossiped/incoming within the cluster, but if it were to happen it is treated like a removal.
@@ -488,7 +487,7 @@ extension Cluster.Membership {
488487
guard var knownMember = self._members[incomingMember.node] else {
489488
// member NOT known locally ----------------------------------------------------------------------------
490489

491-
// only proceed if the member isn't already on it's way out
490+
// only proceed if the member isn't already on its way out
492491
guard incomingMember.status < Cluster.MemberStatus.down else {
493492
// no need to do anything if it is a removal coming in, yet we already do not know this node
494493
continue

Sources/DistributedActors/Cluster/ClusterShell+LeaderActions.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ extension ClusterShell {
135135
func interpretRemoveMemberLeaderAction(_ system: ActorSystem, _ state: inout ClusterShellState, memberToRemove: Cluster.Member) {
136136
let previousGossip = state.latestGossip
137137
// !!! IMPORTANT !!!
138-
// We MUST perform the prune on the _latestGossip, not the wrapper,
138+
// We MUST perform the prune on the _latestGossip_, not the wrapper,
139139
// as otherwise the wrapper enforces "vector time moves forward"
140140
guard let removalChange = state._latestGossip.pruneMember(memberToRemove) else {
141141
return

Sources/DistributedActors/Cluster/ClusterShellState.swift

Lines changed: 0 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -69,100 +69,15 @@ internal struct ClusterShellState: ReadOnlyClusterState {
6969
get {
7070
self._latestGossip
7171
}
72-
// ---------------------
73-
/*
74-
Captured log [third][2020-01-23 1:59:18.8820] [ConvergentGossip.swift:88][/system/cluster/gossip] [trace] Received gossip: GossipEnvelope(payload: DistributedActors.Cluster.Gossip(owner: sact://first:671878924@localhost:9001, seen: Cluster.Gossip.SeenTable(
75-
sact://first@localhost:9001 observed versions:
76-
uniqueNode:sact://first@localhost:9001 @ 8
77-
uniqueNode:sact://third@localhost:9003 @ 6
78-
sact://third@localhost:9003 observed versions:
79-
uniqueNode:sact://first@localhost:9001 @ 7
80-
uniqueNode:sact://third@localhost:9003 @ 6
81-
), membership: Membership(count: 2, leader: [Member(sact://first@localhost:9001, status: up, reachability: reachable)], members: [Member(sact://third:2926310932@localhost:9003, status: up, reachability: reachable, upNumber: 1), Member(sact://first:671878924@localhost:9001, status: up, reachability: reachable, upNumber: 1)])))
82-
// metadata:
83-
// "actor/message": GossipEnvelope(payload: DistributedActors.Cluster.Gossip(owner: sact://first:671878924@localhost:9001, seen: Cluster.Gossip.SeenTable(
84-
// sact://first@localhost:9001 observed versions:
85-
// uniqueNode:sact://first@localhost:9001 @ 8
86-
// uniqueNode:sact://third@localhost:9003 @ 6
87-
// sact://third@localhost:9003 observed versions:
88-
// uniqueNode:sact://first@localhost:9001 @ 7
89-
// uniqueNode:sact://third@localhost:9003 @ 6
90-
// ), membership: Membership(count: 2, leader: [Member(sact://first@localhost:9001, status: up, reachability: reachable)], members: [Member(sact://third:2926310932@localhost:9003, status: up, reachability: reachable, upNumber: 1), Member(sact://first:671878924@localhost:9001, status: up, reachability: reachable, upNumber: 1)])))
91-
// "gossip/localPayload": Optional(DistributedActors.Cluster.Gossip(owner: sact://third:2926310932@localhost:9003, seen: Cluster.Gossip.SeenTable(
92-
// sact://first@localhost:9001 observed versions:
93-
// uniqueNode:sact://first@localhost:9001 @ 7
94-
// uniqueNode:sact://second@localhost:9002 @ 5
95-
// uniqueNode:sact://third@localhost:9003 @ 6
96-
// sact://second@localhost:9002 observed versions:
97-
// uniqueNode:sact://first@localhost:9001 @ 5
98-
// uniqueNode:sact://second@localhost:9002 @ 5
99-
// uniqueNode:sact://third@localhost:9003 @ 6
100-
// sact://third@localhost:9003 observed versions:
101-
// uniqueNode:sact://first@localhost:9001 @ 7
102-
// uniqueNode:sact://second@localhost:9002 @ 5
103-
// uniqueNode:sact://third@localhost:9003 @ 6
104-
// ), membership: Membership(count: 3, leader: [Member(sact://first@localhost:9001, status: up, reachability: reachable)], members: [Member(sact://third:2926310932@localhost:9003, status: up, reachability: reachable), Member(sact://first:671878924@localhost:9001, status: up, reachability: reachable), Member(sact://second:1339064558@localhost:9002, status: down, reachability: reachable)])))
105-
Captured log [third][2020-01-23 1:59:18.8820] [ClusterShellState.swift:74][/system/cluster] [info] KEEP VERSION >>> Optional([uniqueNode:sact://first@localhost:9001: 8, uniqueNode:sact://third@localhost:9003: 6, uniqueNode:sact://second@localhost:9002: 5])
106-
NOW: Gossip(owner: sact://third:2926310932@localhost:9003, seen: Cluster.Gossip.SeenTable(
107-
sact://first@localhost:9001 observed versions:
108-
uniqueNode:sact://first@localhost:9001 @ 7
109-
uniqueNode:sact://second@localhost:9002 @ 5
110-
uniqueNode:sact://third@localhost:9003 @ 6
111-
sact://second@localhost:9002 observed versions:
112-
uniqueNode:sact://first@localhost:9001 @ 5
113-
uniqueNode:sact://second@localhost:9002 @ 5
114-
uniqueNode:sact://third@localhost:9003 @ 6
115-
sact://third@localhost:9003 observed versions:
116-
uniqueNode:sact://first@localhost:9001 @ 7
117-
uniqueNode:sact://second@localhost:9002 @ 5
118-
uniqueNode:sact://third@localhost:9003 @ 6
119-
), membership: Membership(count: 3, leader: [Member(sact://first@localhost:9001, status: up, reachability: reachable)], members: [Member(sact://third:2926310932@localhost:9003, status: up, reachability: reachable), Member(sact://first:671878924@localhost:9001, status: up, reachability: reachable), Member(sact://second:1339064558@localhost:9002, status: down, reachability: reachable)]))
120-
NEW: Gossip(owner: sact://third:2926310932@localhost:9003, seen: Cluster.Gossip.SeenTable(
121-
sact://first@localhost:9001 observed versions:
122-
uniqueNode:sact://first@localhost:9001 @ 8
123-
uniqueNode:sact://second@localhost:9002 @ 5
124-
uniqueNode:sact://third@localhost:9003 @ 6
125-
sact://second@localhost:9002 observed versions:
126-
uniqueNode:sact://first@localhost:9001 @ 5
127-
uniqueNode:sact://second@localhost:9002 @ 5
128-
uniqueNode:sact://third@localhost:9003 @ 6
129-
sact://third@localhost:9003 observed versions:
130-
uniqueNode:sact://first@localhost:9001 @ 8
131-
uniqueNode:sact://second@localhost:9002 @ 5
132-
uniqueNode:sact://third@localhost:9003 @ 6
133-
), membership: Membership(count: 3, leader: [Member(sact://first@localhost:9001, status: up, reachability: reachable)], members: [Member(sact://third:2926310932@localhost:9003, status: up, reachability: reachable), Member(sact://first:671878924@localhost:9001, status: up, reachability: reachable), Member(sact://second:1339064558@localhost:9002, status: down, reachability: reachable)]))
134-
135-
UPON a removed gossip we must remove as well, and not bring it back on third magically
136-
*/
137-
// ---------------------
138-
13972
set {
14073
if self._latestGossip.membership == newValue.membership {
141-
// self.log.info("""
142-
// KEEP VERSION >>> \(newValue.seen.version(at: self.myselfNode))
143-
// NOW: \(self._latestGossip)
144-
// NEW: \(newValue)
145-
// """)
14674
self._latestGossip = newValue
14775
} else {
148-
precondition("\(self._latestGossip.membership)" != "\(newValue.membership)", "WHY! ARE THOSE EQUAL: \(reflecting: self._latestGossip.membership) ||||| \(reflecting: newValue.membership)")
14976
let next: Cluster.Gossip
15077
if self._latestGossip.version == newValue.version {
15178
next = newValue.incrementingOwnerVersion()
152-
// self.log.info("""
153-
// BUMP VERSION >>> \(self._latestGossip.version) >>>> \(next.version)
154-
// NOW: \(self._latestGossip)
155-
// NEW: \(newValue)
156-
// RES: \(next)
157-
// """)
15879
} else {
15980
next = newValue
160-
// self.log.info("""
161-
// ACK VERSION >>> \(self._latestGossip.version) >>>> \(next.version)
162-
// NOW: \(self._latestGossip)
163-
// NEW: \(newValue)
164-
// RES: \(next)
165-
// """)
16681
}
16782

16883
self._latestGossip = next

Sources/DistributedActors/Pattern/ConvergentGossip.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ final class ConvergentGossip<Payload: Codable> {
123123
let envelope = GossipEnvelope(payload: payload) // TODO: carry all the vector clocks here rather in the payload
124124

125125
// TODO: if we have seen tables, we can use them to bias the gossip towards the "more behind" nodes
126-
context.log.info("Sending gossip to \(target)", metadata: [
126+
context.log.trace("Sending gossip to \(target)", metadata: [
127127
"gossip/target": "\(target.address)",
128128
"gossip/peerCount": "\(self.peers.count)",
129129
"gossip/peers": "\(self.peers.map { $0.address })",

Tests/DistributedActorsTests/Cluster/MembershipGossipTests.swift

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -379,8 +379,6 @@ final class MembershipGossipTests: XCTestCase {
379379
gossip.seen.version(at: self.nodeA).shouldEqual(expected.seen.version(at: self.nodeA))
380380
gossip.seen.version(at: self.nodeB).shouldEqual(expected.seen.version(at: self.nodeB))
381381
gossip.seen.version(at: self.nodeC).shouldEqual(expected.seen.version(at: self.nodeC))
382-
// gossip.seen.shouldEqual(expected.seen)
383-
// gossip.membership.shouldEqual(expected.membership)
384382
gossip.shouldEqual(expected)
385383
}
386384

0 commit comments

Comments
 (0)