Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PWX-35939 Exposing cluster domain of a node in the SDK #2415

Merged
merged 2 commits into from
Feb 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions SDK_CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@

## Releases

### v0.176.0 - (02/15/2024)

* Add cluster domain field to StorageNode

### v0.175.0 - (12/15/2023)

* Add defrag status data structure
Expand Down
1 change: 1 addition & 0 deletions api/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -1036,6 +1036,7 @@ func (s *Node) ToStorageNode() *StorageNode {
SecurityStatus: s.SecurityStatus,
SchedulerTopology: s.SchedulerTopology,
NonQuorumMember: s.NonQuorumMember,
ClusterDomain: s.DomainID,
}

node.Disks = make(map[string]*StorageResource)
Expand Down
8,398 changes: 4,205 additions & 4,193 deletions api/api.pb.go

Large diffs are not rendered by default.

15 changes: 7 additions & 8 deletions api/api.proto
Original file line number Diff line number Diff line change
Expand Up @@ -1633,6 +1633,8 @@ message StorageNode {
// when initializing until it reaches a point where we can definitely determine whether
// it is a quorum member or not.
bool non_quorum_member = 22;
// Name of the cluster domain where the node belongs.
string cluster_domain = 23;
}

// StorageCluster represents the state and information about the cluster
Expand Down Expand Up @@ -4111,7 +4113,7 @@ message CollectDiagsJob {

// DefragJob describes a job to run defragmentation on cluster nodes
message DefragJob {
// MaxDurationHours defines the time limit in hours
// MaxDurationHours defines the time limit in hours
double max_duration_hours = 1;
// MaxNodesInParallel defines the maximum number of nodes running the defrag job in parallel
uint32 max_nodes_in_parallel = 2;
Expand All @@ -4123,7 +4125,7 @@ message DefragJob {
// if not provided, will run on all nodes
// cannot coexist with IncludeNodes
repeated string exclude_nodes = 4;
// NodeSelector is a list of node label `key=value` pairs separated by comma,
// NodeSelector is a list of node label `key=value` pairs separated by comma,
// which selects the nodes to be run on for the job
// can coexist with ExcludeNodes but cannot coexist with IncludeNodes
repeated string node_selector = 5;
Expand All @@ -4141,7 +4143,7 @@ message DefragNodeStatus {
string running_schedule = 2;
}

// DefragNodeStatus describes the defragmentation status of a pool
// DefragNodeStatus describes the defragmentation status of a pool
message DefragPoolStatus {
// NumIterations counts the number of times the pool gets defraged
uint32 num_iterations = 1;
Expand Down Expand Up @@ -5659,7 +5661,7 @@ message SdkVersion {
// SDK version major value of this specification
Major = 0;
// SDK version minor value of this specification
Minor = 175;
Minor = 176;
// SDK version patch value of this specification
Patch = 0;
}
Expand Down Expand Up @@ -6340,7 +6342,7 @@ message SdkVerifyChecksumStartRequest {
// SdkVerifyChecksumStartResponse defines the response for a
// SdkVerifyChecksumStartRequest.
message SdkVerifyChecksumStartResponse {
// Status code representing the state of the verify checksum operation
// Status code representing the state of the verify checksum operation
VerifyChecksum.VerifyChecksumStatus status = 1;
// Text blob containing ASCII text providing details of the operation
string message = 2;
Expand Down Expand Up @@ -6375,6 +6377,3 @@ message SdkVerifyChecksumStopResponse {
// Text blob containing ASCII text providing details of the operation
string message = 1;
}



8 changes: 6 additions & 2 deletions api/server/sdk/api/api.swagger.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 7 additions & 1 deletion api/server/sdk/node_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ import (
"testing"
"time"

"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/golang/mock/gomock"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"

Expand Down Expand Up @@ -156,6 +156,7 @@ func TestSdkNodeEnumerateWithFilters(t *testing.T) {
},
},
NonQuorumMember: true,
DomainID: "blue",
},
},
}
Expand All @@ -174,6 +175,7 @@ func TestSdkNodeEnumerateWithFilters(t *testing.T) {
},
},
NonQuorumMember: true,
ClusterDomain: "blue",
}

s.MockCluster().EXPECT().Enumerate().Return(cluster, nil).Times(1)
Expand Down Expand Up @@ -246,6 +248,7 @@ func TestSdkNodeInspect(t *testing.T) {
},
HWType: api.HardwareType_VirtualMachine,
NonQuorumMember: true,
DomainID: "blue",
}
s.MockCluster().EXPECT().Inspect(nodeid).Return(node, nil).Times(1)

Expand All @@ -271,6 +274,7 @@ func TestSdkNodeInspect(t *testing.T) {
assert.Equal(t, rn.GetStatus(), node.Status)
assert.Equal(t, rn.GetHWType(), node.HWType)
assert.Equal(t, node.NonQuorumMember, rn.NonQuorumMember)
assert.Equal(t, node.DomainID, rn.ClusterDomain)

// Check Disk
assert.Len(t, rn.GetDisks(), 2)
Expand Down Expand Up @@ -361,6 +365,7 @@ func TestSdkNodeInspectCurrent(t *testing.T) {
},
},
NonQuorumMember: true,
DomainID: "blue",
}

cluster := api.Cluster{
Expand Down Expand Up @@ -393,6 +398,7 @@ func TestSdkNodeInspectCurrent(t *testing.T) {
assert.Equal(t, rn.GetStatus(), node.Status)
assert.Equal(t, rn.GetHWType(), node.HWType)
assert.Equal(t, node.NonQuorumMember, rn.NonQuorumMember)
assert.Equal(t, node.DomainID, rn.ClusterDomain)

// Check Disk
assert.Len(t, rn.GetDisks(), 1)
Expand Down
11 changes: 5 additions & 6 deletions cluster/manager/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ func (c *ClusterManager) getNodeEntry(nodeID string, clustDBRef *cluster.Cluster
n.HWType = v.HWType
n.SecurityStatus = v.SecurityStatus
n.NonQuorumMember = v.NonQuorumMember
n.DomainID = v.ClusterDomain
} else {
logrus.Warnf("Could not query NodeID %v", nodeID)
// Node entry won't be refreshed form DB, will use the "offline" original
Expand Down Expand Up @@ -1157,7 +1158,7 @@ func (c *ClusterManager) waitForQuorum(exist bool) error {
return nil
}

func (c *ClusterManager) initializeCluster(db kvdb.Kvdb, selfClusterDomain string) (
func (c *ClusterManager) initializeCluster(db kvdb.Kvdb) (
*cluster.ClusterInfo,
error,
) {
Expand Down Expand Up @@ -1220,10 +1221,9 @@ func (c *ClusterManager) initListeners(
db kvdb.Kvdb,
nodeExists *bool,
nodeInitialized bool,
selfClusterDomain string,
) (uint64, *cluster.ClusterInfo, error) {
// Initialize the cluster if required
clusterInfo, err := c.initializeCluster(db, selfClusterDomain)
clusterInfo, err := c.initializeCluster(db)
if err != nil {
return 0, nil, err
}
Expand Down Expand Up @@ -1328,13 +1328,11 @@ func (c *ClusterManager) initializeAndStartHeartbeat(
kvdb kvdb.Kvdb,
exist *bool,
nodeInitialized bool,
selfClusterDomain string,
) (uint64, *cluster.ClusterInfo, error) {
lastIndex, clusterInfo, err := c.initListeners(
kvdb,
exist,
nodeInitialized,
selfClusterDomain,
)
if err != nil {
return 0, nil, err
Expand Down Expand Up @@ -1475,6 +1473,7 @@ func (c *ClusterManager) StartWithConfiguration(
c.gossipPort = gossipPort
c.selfNode.GossipPort = gossipPort
c.selfClusterDomain = selfClusterDomain
c.selfNode.DomainID = selfClusterDomain
if err != nil {
logrus.Errorf("Failed to get external IP address for mgt/data interfaces: %s.",
err)
Expand Down Expand Up @@ -1527,7 +1526,6 @@ func (c *ClusterManager) StartWithConfiguration(
kv,
&exist,
nodeInitialized,
selfClusterDomain,
)
if err != nil {
return err
Expand Down Expand Up @@ -1643,6 +1641,7 @@ func (c *ClusterManager) nodes(clusterDB *cluster.ClusterInfo) []*api.Node {
node.NodeLabels = n.NodeLabels
node.SecurityStatus = n.SecurityStatus
node.NonQuorumMember = n.NonQuorumMember
node.DomainID = n.ClusterDomain
}
nodes = append(nodes, &node)
}
Expand Down
Loading