diff --git a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go index 78e4f4dfcda..bb3551acf5c 100644 --- a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go +++ b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go @@ -17,7 +17,6 @@ limitations under the License. package readtopologyinstance import ( - "fmt" "os" "testing" "time" @@ -86,8 +85,6 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { assert.Equal(t, "ROW", primaryInstance.BinlogFormat) assert.Equal(t, "ON", primaryInstance.GTIDMode) assert.Equal(t, "FULL", primaryInstance.BinlogRowImage) - assert.Contains(t, primaryInstance.SelfBinlogCoordinates.LogFile, fmt.Sprintf("vt-0000000%d-bin", primary.TabletUID)) - assert.Greater(t, primaryInstance.SelfBinlogCoordinates.LogPos, uint32(0)) assert.True(t, primaryInstance.SemiSyncPrimaryEnabled) assert.True(t, primaryInstance.SemiSyncReplicaEnabled) assert.True(t, primaryInstance.SemiSyncPrimaryStatus) @@ -136,8 +133,6 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { assert.Equal(t, "FULL", replicaInstance.BinlogRowImage) assert.Equal(t, utils.Hostname, replicaInstance.SourceHost) assert.Equal(t, primary.MySQLPort, replicaInstance.SourcePort) - assert.Contains(t, replicaInstance.SelfBinlogCoordinates.LogFile, fmt.Sprintf("vt-0000000%d-bin", replica.TabletUID)) - assert.Greater(t, replicaInstance.SelfBinlogCoordinates.LogPos, uint32(0)) assert.False(t, replicaInstance.SemiSyncPrimaryEnabled) assert.True(t, replicaInstance.SemiSyncReplicaEnabled) assert.False(t, replicaInstance.SemiSyncPrimaryStatus) @@ -154,12 +149,6 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { assert.Equal(t, replicaInstance.ReplicationSQLThreadState, inst.ReplicationThreadStateRunning) assert.True(t, replicaInstance.ReplicationIOThreadRuning) assert.True(t, replicaInstance.ReplicationSQLThreadRuning) - assert.Equal(t, replicaInstance.ReadBinlogCoordinates.LogFile, primaryInstance.SelfBinlogCoordinates.LogFile) - assert.Greater(t, replicaInstance.ReadBinlogCoordinates.LogPos, uint32(0)) - assert.Equal(t, replicaInstance.ExecBinlogCoordinates.LogFile, primaryInstance.SelfBinlogCoordinates.LogFile) - assert.Greater(t, replicaInstance.ExecBinlogCoordinates.LogPos, uint32(0)) - assert.Contains(t, replicaInstance.RelaylogCoordinates.LogFile, fmt.Sprintf("vt-0000000%d-relay", replica.TabletUID)) - assert.Greater(t, replicaInstance.RelaylogCoordinates.LogPos, uint32(0)) assert.Empty(t, replicaInstance.LastIOError) assert.Empty(t, replicaInstance.LastSQLError) assert.EqualValues(t, 0, replicaInstance.SQLDelay) diff --git a/go/vt/vtorc/config/config.go b/go/vt/vtorc/config/config.go index 402c67870ba..d28f0175363 100644 --- a/go/vt/vtorc/config/config.go +++ b/go/vt/vtorc/config/config.go @@ -91,20 +91,20 @@ func RegisterFlags(fs *pflag.FlagSet) { // strictly expected from user. // TODO(sougou): change this to yaml parsing, and possible merge with tabletenv. type Configuration struct { - SQLite3DataFile string // full path to sqlite3 datafile - InstancePollSeconds uint // Number of seconds between instance reads - SnapshotTopologiesIntervalHours uint // Interval in hour between snapshot-topologies invocation. Default: 0 (disabled) - ReasonableReplicationLagSeconds int // Above this value is considered a problem - AuditLogFile string // Name of log file for audit operations. Disabled when empty. - AuditToSyslog bool // If true, audit messages are written to syslog - AuditToBackendDB bool // If true, audit messages are written to the backend DB's `audit` table (default: true) - AuditPurgeDays uint // Days after which audit entries are purged from the database - RecoveryPeriodBlockSeconds int // (overrides `RecoveryPeriodBlockMinutes`) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping - PreventCrossDataCenterPrimaryFailover bool // When true (default: false), cross-DC primary failover are not allowed, vtorc will do all it can to only fail over within same DC, or else not fail over at all. - WaitReplicasTimeoutSeconds int // Timeout on amount of time to wait for the replicas in case of ERS. Should be a small value because we should fail-fast. Should not be larger than LockTimeout since that is the total time we use for an ERS. - TolerableReplicationLagSeconds int // Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary in PRS. - TopoInformationRefreshSeconds int // Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topo-server. - RecoveryPollSeconds int // Timer duration on which VTOrc recovery analysis runs + SQLite3DataFile string // full path to sqlite3 datafile + InstancePollSeconds uint // Number of seconds between instance reads + SnapshotTopologiesIntervalHours uint // Interval in hour between snapshot-topologies invocation. Default: 0 (disabled) + ReasonableReplicationLagSeconds int // Above this value is considered a problem + AuditLogFile string // Name of log file for audit operations. Disabled when empty. + AuditToSyslog bool // If true, audit messages are written to syslog + AuditToBackendDB bool // If true, audit messages are written to the backend DB's `audit` table (default: true) + AuditPurgeDays uint // Days after which audit entries are purged from the database + RecoveryPeriodBlockSeconds int // (overrides `RecoveryPeriodBlockMinutes`) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping + PreventCrossCellPrimaryFailover bool // When true (default: false), cross-DC primary failover are not allowed, vtorc will do all it can to only fail over within same DC, or else not fail over at all. + WaitReplicasTimeoutSeconds int // Timeout on amount of time to wait for the replicas in case of ERS. Should be a small value because we should fail-fast. Should not be larger than LockTimeout since that is the total time we use for an ERS. + TolerableReplicationLagSeconds int // Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary in PRS. + TopoInformationRefreshSeconds int // Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topo-server. + RecoveryPollSeconds int // Timer duration on which VTOrc recovery analysis runs } // ToJSONString will marshal this configuration as JSON @@ -130,7 +130,7 @@ func UpdateConfigValuesFromFlags() { Config.AuditToSyslog = auditToSyslog Config.AuditPurgeDays = uint(auditPurgeDuration / (time.Hour * 24)) Config.RecoveryPeriodBlockSeconds = int(recoveryPeriodBlockDuration / time.Second) - Config.PreventCrossDataCenterPrimaryFailover = preventCrossCellFailover + Config.PreventCrossCellPrimaryFailover = preventCrossCellFailover Config.WaitReplicasTimeoutSeconds = int(waitReplicasTimeout / time.Second) Config.TolerableReplicationLagSeconds = int(tolerableReplicationLag / time.Second) Config.TopoInformationRefreshSeconds = int(topoInformationRefreshDuration / time.Second) @@ -165,19 +165,19 @@ func LogConfigValues() { func newConfiguration() *Configuration { return &Configuration{ - SQLite3DataFile: "file::memory:?mode=memory&cache=shared", - InstancePollSeconds: 5, - SnapshotTopologiesIntervalHours: 0, - ReasonableReplicationLagSeconds: 10, - AuditLogFile: "", - AuditToSyslog: false, - AuditToBackendDB: false, - AuditPurgeDays: 7, - RecoveryPeriodBlockSeconds: 30, - PreventCrossDataCenterPrimaryFailover: false, - WaitReplicasTimeoutSeconds: 30, - TopoInformationRefreshSeconds: 15, - RecoveryPollSeconds: 1, + SQLite3DataFile: "file::memory:?mode=memory&cache=shared", + InstancePollSeconds: 5, + SnapshotTopologiesIntervalHours: 0, + ReasonableReplicationLagSeconds: 10, + AuditLogFile: "", + AuditToSyslog: false, + AuditToBackendDB: false, + AuditPurgeDays: 7, + RecoveryPeriodBlockSeconds: 30, + PreventCrossCellPrimaryFailover: false, + WaitReplicasTimeoutSeconds: 30, + TopoInformationRefreshSeconds: 15, + RecoveryPollSeconds: 1, } } diff --git a/go/vt/vtorc/config/config_test.go b/go/vt/vtorc/config/config_test.go index 2009b476f1d..47e6893cc26 100644 --- a/go/vt/vtorc/config/config_test.go +++ b/go/vt/vtorc/config/config_test.go @@ -182,7 +182,7 @@ func TestUpdateConfigValuesFromFlags(t *testing.T) { }() testConfig := newConfiguration() - testConfig.PreventCrossDataCenterPrimaryFailover = true + testConfig.PreventCrossCellPrimaryFailover = true UpdateConfigValuesFromFlags() require.Equal(t, testConfig, Config) }) diff --git a/go/vt/vtorc/db/generate_base.go b/go/vt/vtorc/db/generate_base.go index fbb96ef75c0..39fef9bf277 100644 --- a/go/vt/vtorc/db/generate_base.go +++ b/go/vt/vtorc/db/generate_base.go @@ -31,7 +31,6 @@ var TableNames = []string{ "vtorc_db_deployments", "global_recovery_disable", "topology_recovery_steps", - "database_instance_stale_binlog_coordinates", "vitess_tablet", "vitess_keyspace", "vitess_shard", @@ -54,31 +53,19 @@ CREATE TABLE database_instance ( binlog_format varchar(16) NOT NULL, log_bin tinyint NOT NULL, log_replica_updates tinyint NOT NULL, - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint NOT NULL, source_host varchar(128) NOT NULL, source_port smallint NOT NULL, replica_sql_running tinyint NOT NULL, replica_io_running tinyint NOT NULL, - source_log_file varchar(128) NOT NULL, - read_source_log_pos bigint NOT NULL, - relay_source_log_file varchar(128) NOT NULL, - exec_source_log_pos bigint NOT NULL, replication_lag_seconds bigint DEFAULT NULL, replica_lag_seconds bigint DEFAULT NULL, read_only TINYint not null default 0, last_sql_error TEXT not null default '', last_io_error TEXT not null default '', oracle_gtid TINYint not null default 0, - mariadb_gtid TINYint not null default 0, - relay_log_file varchar(128) not null default '', - relay_log_pos bigint not null default 0, - pseudo_gtid TINYint not null default 0, replication_depth TINYint not null default 0, has_replication_filters TINYint not null default 0, - data_center varchar(32) not null default '', - physical_environment varchar(32) not null default '', - is_co_primary TINYint not null default 0, + cell varchar(32) not null default '', sql_delay int not null default 0, binlog_server TINYint not null default 0, supports_oracle_gtid TINYint not null default 0, @@ -88,7 +75,6 @@ CREATE TABLE database_instance ( gtid_purged text not null default '', has_replication_credentials TINYint not null default 0, allow_tls TINYint not null default 0, - semi_sync_enforced TINYint not null default 0, version_comment varchar(128) NOT NULL DEFAULT '', major_version varchar(16) not null default '', binlog_row_image varchar(16) not null default '', @@ -102,7 +88,6 @@ CREATE TABLE database_instance ( ancestry_uuid text not null default '', replication_sql_thread_state tinyint signed not null default 0, replication_io_thread_state tinyint signed not null default 0, - region varchar(32) not null default '', semi_sync_primary_timeout int NOT NULL DEFAULT 0, semi_sync_primary_wait_for_replica_count int NOT NULL DEFAULT 0, semi_sync_primary_status TINYint NOT NULL DEFAULT 0, @@ -307,20 +292,6 @@ CREATE TABLE topology_recovery_steps ( PRIMARY KEY (recovery_step_id) )`, ` -DROP TABLE IF EXISTS database_instance_stale_binlog_coordinates -`, - ` -CREATE TABLE database_instance_stale_binlog_coordinates ( - alias varchar(256) NOT NULL, - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint NOT NULL, - first_seen timestamp not null default (''), - PRIMARY KEY (alias) -)`, - ` -CREATE INDEX first_seen_idx_database_instance_stale_binlog_coordinates ON database_instance_stale_binlog_coordinates (first_seen) - `, - ` DROP TABLE IF EXISTS vitess_tablet `, ` diff --git a/go/vt/vtorc/inst/analysis.go b/go/vt/vtorc/inst/analysis.go index 328b43df0c5..5342cc0a09d 100644 --- a/go/vt/vtorc/inst/analysis.go +++ b/go/vt/vtorc/inst/analysis.go @@ -83,34 +83,19 @@ type ReplicationAnalysisHints struct { AuditAnalysis bool } -type AnalysisInstanceType string - -const ( - AnalysisInstanceTypePrimary AnalysisInstanceType = "primary" - AnalysisInstanceTypeCoPrimary AnalysisInstanceType = "co-primary" - AnalysisInstanceTypeIntermediatePrimary AnalysisInstanceType = "intermediate-primary" -) - // ReplicationAnalysis notes analysis on replication chain status, per instance type ReplicationAnalysis struct { - AnalyzedInstanceHostname string - AnalyzedInstancePort int AnalyzedInstanceAlias string AnalyzedInstancePrimaryAlias string TabletType topodatapb.TabletType PrimaryTimeStamp time.Time ClusterDetails ClusterInfo - AnalyzedInstanceDataCenter string - AnalyzedInstanceRegion string AnalyzedKeyspace string AnalyzedShard string // ShardPrimaryTermTimestamp is the primary term start time stored in the shard record. ShardPrimaryTermTimestamp string - AnalyzedInstancePhysicalEnvironment string - AnalyzedInstanceBinlogCoordinates BinlogCoordinates IsPrimary bool IsClusterPrimary bool - IsCoPrimary bool LastCheckValid bool LastCheckPartialSuccess bool CountReplicas uint @@ -126,7 +111,6 @@ type ReplicationAnalysis struct { StructureAnalysis []StructureAnalysisCode IsBinlogServer bool OracleGTIDImmediateTopology bool - MariaDBGTIDImmediateTopology bool BinlogServerImmediateTopology bool SemiSyncPrimaryEnabled bool SemiSyncPrimaryStatus bool @@ -159,18 +143,6 @@ func (replicationAnalysis *ReplicationAnalysis) MarshalJSON() ([]byte, error) { return json.Marshal(i) } -// Get a string description of the analyzed instance type (primary? co-primary? intermediate-primary?) -func (replicationAnalysis *ReplicationAnalysis) GetAnalysisInstanceType() AnalysisInstanceType { - if replicationAnalysis.IsCoPrimary { - return AnalysisInstanceTypeCoPrimary - } - - if replicationAnalysis.IsPrimary { - return AnalysisInstanceTypePrimary - } - return AnalysisInstanceTypeIntermediatePrimary -} - // ValidSecondsFromSeenToLastAttemptedCheck returns the maximum allowed elapsed time // between last_attempted_check to last_checked before we consider the instance as invalid. func ValidSecondsFromSeenToLastAttemptedCheck() uint { diff --git a/go/vt/vtorc/inst/analysis_dao.go b/go/vt/vtorc/inst/analysis_dao.go index b348d17d45f..d82967e6a5e 100644 --- a/go/vt/vtorc/inst/analysis_dao.go +++ b/go/vt/vtorc/inst/analysis_dao.go @@ -71,7 +71,7 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna } // TODO(sougou); deprecate ReduceReplicationAnalysisCount - args := sqlutils.Args(config.Config.ReasonableReplicationLagSeconds, ValidSecondsFromSeenToLastAttemptedCheck(), config.Config.ReasonableReplicationLagSeconds, keyspace, shard) + args := sqlutils.Args(ValidSecondsFromSeenToLastAttemptedCheck(), config.Config.ReasonableReplicationLagSeconds, keyspace, shard) query := ` SELECT vitess_tablet.info AS tablet_info, @@ -87,20 +87,8 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna primary_instance.read_only AS read_only, MIN(primary_instance.gtid_errant) AS gtid_errant, MIN(primary_instance.alias) IS NULL AS is_invalid, - MIN(primary_instance.data_center) AS data_center, - MIN(primary_instance.region) AS region, - MIN(primary_instance.physical_environment) AS physical_environment, - MIN(primary_instance.binary_log_file) AS binary_log_file, - MIN(primary_instance.binary_log_pos) AS binary_log_pos, + MIN(primary_instance.cell) AS cell, MIN(primary_tablet.info) AS primary_tablet_info, - MIN( - IFNULL( - primary_instance.binary_log_file = database_instance_stale_binlog_coordinates.binary_log_file - AND primary_instance.binary_log_pos = database_instance_stale_binlog_coordinates.binary_log_pos - AND database_instance_stale_binlog_coordinates.first_seen < NOW() - interval ? second, - 0 - ) - ) AS is_stale_binlog_coordinates, MIN( primary_instance.last_checked <= primary_instance.last_seen and primary_instance.last_attempted_check <= primary_instance.last_seen + interval ? second @@ -115,7 +103,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna OR substr(primary_instance.source_host, 1, 2) = '//' ) ) AS is_primary, - MIN(primary_instance.is_co_primary) AS is_co_primary, MIN(primary_instance.gtid_mode) AS gtid_mode, COUNT(replica_instance.server_id) AS count_replicas, IFNULL( @@ -172,7 +159,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna MIN( primary_instance.semi_sync_replica_enabled ) AS semi_sync_replica_enabled, - SUM(replica_instance.is_co_primary) AS count_co_primary_replicas, SUM(replica_instance.oracle_gtid) AS count_oracle_gtid_replicas, IFNULL( SUM( @@ -181,9 +167,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna ), 0 ) AS count_valid_oracle_gtid_replicas, - SUM( - replica_instance.binlog_server - ) AS count_binlog_server_replicas, IFNULL( SUM( replica_instance.last_checked <= replica_instance.last_seen @@ -201,17 +184,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna ), 0 ) AS count_valid_semi_sync_replicas, - MIN( - primary_instance.mariadb_gtid - ) AS is_mariadb_gtid, - SUM(replica_instance.mariadb_gtid) AS count_mariadb_gtid_replicas, - IFNULL( - SUM( - replica_instance.last_checked <= replica_instance.last_seen - AND replica_instance.mariadb_gtid != 0 - ), - 0 - ) AS count_valid_mariadb_gtid_replicas, IFNULL( SUM( replica_instance.log_bin @@ -285,9 +257,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna primary_instance.hostname = replica_instance.source_host AND primary_instance.port = replica_instance.source_port ) - LEFT JOIN database_instance_stale_binlog_coordinates ON ( - vitess_tablet.alias = database_instance_stale_binlog_coordinates.alias - ) WHERE ? IN ('', vitess_keyspace.keyspace) AND ? IN ('', vitess_tablet.shard) @@ -331,21 +300,8 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.ShardPrimaryTermTimestamp = m.GetString("shard_primary_term_timestamp") a.IsPrimary = m.GetBool("is_primary") - countCoPrimaryReplicas := m.GetUint("count_co_primary_replicas") - a.IsCoPrimary = m.GetBool("is_co_primary") || (countCoPrimaryReplicas > 0) - a.AnalyzedInstanceHostname = m.GetString("hostname") - a.AnalyzedInstancePort = m.GetInt("port") a.AnalyzedInstanceAlias = topoproto.TabletAliasString(tablet.Alias) a.AnalyzedInstancePrimaryAlias = topoproto.TabletAliasString(primaryTablet.Alias) - a.AnalyzedInstanceDataCenter = m.GetString("data_center") - a.AnalyzedInstanceRegion = m.GetString("region") - a.AnalyzedInstancePhysicalEnvironment = m.GetString("physical_environment") - a.AnalyzedInstanceBinlogCoordinates = BinlogCoordinates{ - LogFile: m.GetString("binary_log_file"), - LogPos: m.GetUint32("binary_log_pos"), - Type: BinaryLog, - } - isStaleBinlogCoordinates := m.GetBool("is_stale_binlog_coordinates") a.ClusterDetails.Keyspace = m.GetString("keyspace") a.ClusterDetails.Shard = m.GetString("shard") a.GTIDMode = m.GetString("gtid_mode") @@ -359,13 +315,10 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.IsFailingToConnectToPrimary = m.GetBool("is_failing_to_connect_to_primary") a.ReplicationStopped = m.GetBool("replication_stopped") a.IsBinlogServer = m.GetBool("is_binlog_server") - a.ClusterDetails.ReadRecoveryInfo() a.ErrantGTID = m.GetString("gtid_errant") countValidOracleGTIDReplicas := m.GetUint("count_valid_oracle_gtid_replicas") a.OracleGTIDImmediateTopology = countValidOracleGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 - countValidMariaDBGTIDReplicas := m.GetUint("count_valid_mariadb_gtid_replicas") - a.MariaDBGTIDImmediateTopology = countValidMariaDBGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 countValidBinlogServerReplicas := m.GetUint("count_valid_binlog_server_replicas") a.BinlogServerImmediateTopology = countValidBinlogServerReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 a.SemiSyncPrimaryEnabled = m.GetBool("semi_sync_primary_enabled") @@ -528,14 +481,8 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Description = "Primary cannot be reached by vtorc but it has replicating replicas; possibly a network/host issue" // } else if a.IsPrimary && a.SemiSyncPrimaryEnabled && a.SemiSyncPrimaryStatus && a.SemiSyncPrimaryWaitForReplicaCount > 0 && a.SemiSyncPrimaryClients < a.SemiSyncPrimaryWaitForReplicaCount { - if isStaleBinlogCoordinates { - a.Analysis = LockedSemiSyncPrimary - a.Description = "Semi sync primary is locked since it doesn't get enough replica acknowledgements" - } else { - a.Analysis = LockedSemiSyncPrimaryHypothesis - a.Description = "Semi sync primary seems to be locked, more samplings needed to validate" - } - // + a.Analysis = LockedSemiSyncPrimaryHypothesis + a.Description = "Semi sync primary seems to be locked, more samplings needed to validate" } else if a.IsPrimary && a.LastCheckValid && a.CountReplicas == 1 && a.CountValidReplicas == a.CountReplicas && a.CountValidReplicatingReplicas == 0 { a.Analysis = PrimarySingleReplicaNotReplicating a.Description = "Primary is reachable but its single replica is not replicating" @@ -569,7 +516,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna } if a.IsPrimary && a.CountReplicas > 1 && !a.OracleGTIDImmediateTopology && - !a.MariaDBGTIDImmediateTopology && !a.BinlogServerImmediateTopology { a.StructureAnalysis = append(a.StructureAnalysis, NoFailoverSupportStructureWarning) } diff --git a/go/vt/vtorc/inst/analysis_dao_test.go b/go/vt/vtorc/inst/analysis_dao_test.go index c1926fca089..b759c897837 100644 --- a/go/vt/vtorc/inst/analysis_dao_test.go +++ b/go/vt/vtorc/inst/analysis_dao_test.go @@ -34,10 +34,10 @@ var ( // The initialSQL is a set of insert commands copied from a dump of an actual running VTOrc instances. The relevant insert commands are here. // This is a dump taken from a test running 4 tablets, zone1-101 is the primary, zone1-100 is a replica, zone1-112 is a rdonly and zone2-200 is a cross-cell replica. initialSQL = []string{ - `INSERT INTO database_instance VALUES('zone1-0000000112','localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000112-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0);`, - `INSERT INTO database_instance VALUES('zone1-0000000100','localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000100-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0);`, - `INSERT INTO database_instance VALUES('zone1-0000000101','localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,'',0,'',0,NULL,NULL,0,'','',0,0,'',0,0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2);`, - `INSERT INTO database_instance VALUES('zone2-0000000200','localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000200-relay-bin.000002',15815,0,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000112','localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'localhost',6714,1,1,0,0,1,'','',1,1,0,'zone1',0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,'Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,1000000000000000000,1,0,0,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000100','localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'localhost',6714,1,1,0,0,1,'','',1,1,0,'zone1',0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,'Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,1000000000000000000,1,0,1,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000101','localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'',0,0,0,NULL,NULL,0,'','',0,0,0,'zone1',0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,'Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,1000000000000000000,1,1,0,2);`, + `INSERT INTO database_instance VALUES('zone2-0000000200','localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'localhost',6714,1,1,0,0,1,'','',1,1,0,'zone2',0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,'Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,1000000000000000000,1,0,1,0);`, `INSERT INTO vitess_tablet VALUES('zone1-0000000100','localhost',6711,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731307d20706f72745f6d61703a7b6b65793a227674222076616c75653a363730397d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363731312064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, `INSERT INTO vitess_tablet VALUES('zone1-0000000101','localhost',6714,'ks','0','zone1',1,'2022-12-28 07:23:25.129898+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130317d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731337d20706f72745f6d61703a7b6b65793a227674222076616c75653a363731327d206b657973706163653a226b73222073686172643a22302220747970653a5052494d415259206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a36373134207072696d6172795f7465726d5f73746172745f74696d653a7b7365636f6e64733a31363732323132323035206e616e6f7365636f6e64733a3132393839383030307d2064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, `INSERT INTO vitess_tablet VALUES('zone1-0000000112','localhost',6747,'ks','0','zone1',3,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3131327d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363734367d20706f72745f6d61703a7b6b65793a227674222076616c75653a363734357d206b657973706163653a226b73222073686172643a22302220747970653a52444f4e4c59206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363734372064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, @@ -917,21 +917,19 @@ func TestAuditInstanceAnalysisInChangelog(t *testing.T) { // TestPostProcessAnalyses tests the functionality of the postProcessAnalyses function. func TestPostProcessAnalyses(t *testing.T) { ks0 := ClusterInfo{ - Keyspace: "ks", - Shard: "0", - CountInstances: 4, + Keyspace: "ks", + Shard: "0", } ks80 := ClusterInfo{ - Keyspace: "ks", - Shard: "80-", - CountInstances: 3, + Keyspace: "ks", + Shard: "80-", } clusters := map[string]*clusterAnalysis{ getKeyspaceShardName(ks0.Keyspace, ks0.Shard): { - totalTablets: int(ks0.CountInstances), + totalTablets: 4, }, getKeyspaceShardName(ks80.Keyspace, ks80.Shard): { - totalTablets: int(ks80.CountInstances), + totalTablets: 3, }, } diff --git a/go/vt/vtorc/inst/analysis_test.go b/go/vt/vtorc/inst/analysis_test.go deleted file mode 100644 index 70849379a5e..00000000000 --- a/go/vt/vtorc/inst/analysis_test.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "testing" - - "vitess.io/vitess/go/vt/vtorc/config" - - "github.com/stretchr/testify/require" -) - -func init() { - config.MarkConfigurationLoaded() -} - -func TestGetAnalysisInstanceType(t *testing.T) { - { - analysis := &ReplicationAnalysis{} - require.Equal(t, string(analysis.GetAnalysisInstanceType()), "intermediate-primary") - } - { - analysis := &ReplicationAnalysis{IsPrimary: true} - require.Equal(t, string(analysis.GetAnalysisInstanceType()), "primary") - } - { - analysis := &ReplicationAnalysis{IsCoPrimary: true} - require.Equal(t, string(analysis.GetAnalysisInstanceType()), "co-primary") - } - { - analysis := &ReplicationAnalysis{IsPrimary: true, IsCoPrimary: true} - require.Equal(t, string(analysis.GetAnalysisInstanceType()), "co-primary") - } -} diff --git a/go/vt/vtorc/inst/binlog.go b/go/vt/vtorc/inst/binlog.go deleted file mode 100644 index 9c115e4e457..00000000000 --- a/go/vt/vtorc/inst/binlog.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -var detachPattern *regexp.Regexp - -func init() { - detachPattern, _ = regexp.Compile(`//([^/:]+):([\d]+)`) // e.g. `//binlog.01234:567890` -} - -type BinlogType int32 - -const ( - BinaryLog BinlogType = iota - RelayLog -) - -// BinlogCoordinates described binary log coordinates in the form of log file & log position. -type BinlogCoordinates struct { - LogFile string - LogPos uint32 - Type BinlogType -} - -// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306 -func ParseBinlogCoordinates(logFileLogPos string) (*BinlogCoordinates, error) { - tokens := strings.SplitN(logFileLogPos, ":", 2) - if len(tokens) != 2 { - return nil, fmt.Errorf("ParseBinlogCoordinates: Cannot parse BinlogCoordinates from %s. Expected format is file:pos", logFileLogPos) - } - - logPos, err := strconv.ParseUint(tokens[1], 10, 32) - if err != nil { - return nil, fmt.Errorf("ParseBinlogCoordinates: invalid pos: %s", tokens[1]) - } - return &BinlogCoordinates{LogFile: tokens[0], LogPos: uint32(logPos)}, nil -} - -// DisplayString returns a user-friendly string representation of these coordinates -func (binlogCoordinates *BinlogCoordinates) DisplayString() string { - return fmt.Sprintf("%s:%d", binlogCoordinates.LogFile, binlogCoordinates.LogPos) -} - -// String returns a user-friendly string representation of these coordinates -func (binlogCoordinates BinlogCoordinates) String() string { - return binlogCoordinates.DisplayString() -} - -// Equals tests equality of this coordinate and another one. -func (binlogCoordinates *BinlogCoordinates) Equals(other *BinlogCoordinates) bool { - if other == nil { - return false - } - return binlogCoordinates.LogFile == other.LogFile && binlogCoordinates.LogPos == other.LogPos && binlogCoordinates.Type == other.Type -} - -// IsEmpty returns true if the log file is empty, unnamed -func (binlogCoordinates *BinlogCoordinates) IsEmpty() bool { - return binlogCoordinates.LogFile == "" -} - -// SmallerThan returns true if this coordinate is strictly smaller than the other. -func (binlogCoordinates *BinlogCoordinates) SmallerThan(other *BinlogCoordinates) bool { - if binlogCoordinates.LogFile < other.LogFile { - return true - } - if binlogCoordinates.LogFile == other.LogFile && binlogCoordinates.LogPos < other.LogPos { - return true - } - return false -} - -// SmallerThanOrEquals returns true if this coordinate is the same or equal to the other one. -// We do NOT compare the type so we can not use this.Equals() -func (binlogCoordinates *BinlogCoordinates) SmallerThanOrEquals(other *BinlogCoordinates) bool { - if binlogCoordinates.SmallerThan(other) { - return true - } - return binlogCoordinates.LogFile == other.LogFile && binlogCoordinates.LogPos == other.LogPos // No Type comparison -} - -// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's. -func (binlogCoordinates *BinlogCoordinates) FileSmallerThan(other *BinlogCoordinates) bool { - return binlogCoordinates.LogFile < other.LogFile -} - -// FileNumberDistance returns the numeric distance between this coordinate's file number and the other's. -// Effectively it means "how many rotates/FLUSHes would make these coordinates's file reach the other's" -func (binlogCoordinates *BinlogCoordinates) FileNumberDistance(other *BinlogCoordinates) int { - thisNumber, _ := binlogCoordinates.FileNumber() - otherNumber, _ := other.FileNumber() - return otherNumber - thisNumber -} - -// FileNumber returns the numeric value of the file, and the length in characters representing the number in the filename. -// Example: FileNumber() of mysqld.log.000789 is (789, 6) -func (binlogCoordinates *BinlogCoordinates) FileNumber() (int, int) { - tokens := strings.Split(binlogCoordinates.LogFile, ".") - numPart := tokens[len(tokens)-1] - numLen := len(numPart) - fileNum, err := strconv.Atoi(numPart) - if err != nil { - return 0, 0 - } - return fileNum, numLen -} - -// PreviousFileCoordinatesBy guesses the filename of the previous binlog/relaylog, by given offset (number of files back) -func (binlogCoordinates *BinlogCoordinates) PreviousFileCoordinatesBy(offset int) (BinlogCoordinates, error) { - result := BinlogCoordinates{LogPos: 0, Type: binlogCoordinates.Type} - - fileNum, numLen := binlogCoordinates.FileNumber() - if fileNum == 0 { - return result, errors.New("Log file number is zero, cannot detect previous file") - } - newNumStr := fmt.Sprintf("%d", (fileNum - offset)) - newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr - - tokens := strings.Split(binlogCoordinates.LogFile, ".") - tokens[len(tokens)-1] = newNumStr - result.LogFile = strings.Join(tokens, ".") - return result, nil -} - -// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog -func (binlogCoordinates *BinlogCoordinates) PreviousFileCoordinates() (BinlogCoordinates, error) { - return binlogCoordinates.PreviousFileCoordinatesBy(1) -} - -// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog -func (binlogCoordinates *BinlogCoordinates) NextFileCoordinates() (BinlogCoordinates, error) { - result := BinlogCoordinates{LogPos: 0, Type: binlogCoordinates.Type} - - fileNum, numLen := binlogCoordinates.FileNumber() - newNumStr := fmt.Sprintf("%d", (fileNum + 1)) - newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr - - tokens := strings.Split(binlogCoordinates.LogFile, ".") - tokens[len(tokens)-1] = newNumStr - result.LogFile = strings.Join(tokens, ".") - return result, nil -} - -// Detach returns a detached form of coordinates -func (binlogCoordinates *BinlogCoordinates) Detach() (detachedCoordinates BinlogCoordinates) { - detachedCoordinates = BinlogCoordinates{LogFile: fmt.Sprintf("//%s:%d", binlogCoordinates.LogFile, binlogCoordinates.LogPos), LogPos: binlogCoordinates.LogPos} - return detachedCoordinates -} - -// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's. -func (binlogCoordinates *BinlogCoordinates) ExtractDetachedCoordinates() (isDetached bool, detachedCoordinates BinlogCoordinates) { - detachedCoordinatesSubmatch := detachPattern.FindStringSubmatch(binlogCoordinates.LogFile) - if len(detachedCoordinatesSubmatch) == 0 { - return false, *binlogCoordinates - } - detachedCoordinates.LogFile = detachedCoordinatesSubmatch[1] - logPos, _ := strconv.ParseUint(detachedCoordinatesSubmatch[2], 10, 32) - detachedCoordinates.LogPos = uint32(logPos) - return true, detachedCoordinates -} diff --git a/go/vt/vtorc/inst/binlog_test.go b/go/vt/vtorc/inst/binlog_test.go deleted file mode 100644 index bc0110e981c..00000000000 --- a/go/vt/vtorc/inst/binlog_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package inst - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/vt/vtorc/config" -) - -var testCoordinates = BinlogCoordinates{LogFile: "mysql-bin.000010", LogPos: 108} - -func init() { - config.MarkConfigurationLoaded() -} - -func TestDetach(t *testing.T) { - detachedCoordinates := testCoordinates.Detach() - require.Equal(t, detachedCoordinates.LogFile, "//mysql-bin.000010:108") - require.Equal(t, detachedCoordinates.LogPos, testCoordinates.LogPos) -} - -func TestDetachedCoordinates(t *testing.T) { - isDetached, detachedCoordinates := testCoordinates.ExtractDetachedCoordinates() - require.False(t, isDetached) - require.Equal(t, detachedCoordinates.LogFile, testCoordinates.LogFile) - require.Equal(t, detachedCoordinates.LogPos, testCoordinates.LogPos) -} - -func TestDetachedCoordinates2(t *testing.T) { - detached := testCoordinates.Detach() - isDetached, coordinates := detached.ExtractDetachedCoordinates() - - require.True(t, isDetached) - require.Equal(t, coordinates.LogFile, testCoordinates.LogFile) - require.Equal(t, coordinates.LogPos, testCoordinates.LogPos) -} - -func TestPreviousFileCoordinates(t *testing.T) { - previous, err := testCoordinates.PreviousFileCoordinates() - - require.NoError(t, err) - require.Equal(t, previous.LogFile, "mysql-bin.000009") - require.Equal(t, previous.LogPos, uint32(0)) -} - -func TestNextFileCoordinates(t *testing.T) { - next, err := testCoordinates.NextFileCoordinates() - - require.NoError(t, err) - require.Equal(t, next.LogFile, "mysql-bin.000011") - require.Equal(t, next.LogPos, uint32(0)) -} - -func TestBinlogCoordinates(t *testing.T) { - c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104} - c2 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104} - c3 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 5000} - c4 := BinlogCoordinates{LogFile: "mysql-bin.00112", LogPos: 104} - - require.True(t, c1.Equals(&c2)) - require.False(t, c1.Equals(&c3)) - require.False(t, c1.Equals(&c4)) - require.False(t, c1.SmallerThan(&c2)) - require.True(t, c1.SmallerThan(&c3)) - require.True(t, c1.SmallerThan(&c4)) - require.True(t, c3.SmallerThan(&c4)) - require.False(t, c3.SmallerThan(&c2)) - require.False(t, c4.SmallerThan(&c2)) - require.False(t, c4.SmallerThan(&c3)) - - require.True(t, c1.SmallerThanOrEquals(&c2)) - require.True(t, c1.SmallerThanOrEquals(&c3)) -} - -func TestBinlogPrevious(t *testing.T) { - c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104} - cres, err := c1.PreviousFileCoordinates() - - require.NoError(t, err) - require.Equal(t, c1.Type, cres.Type) - require.Equal(t, cres.LogFile, "mysql-bin.00016") - - c2 := BinlogCoordinates{LogFile: "mysql-bin.00100", LogPos: 104} - cres, err = c2.PreviousFileCoordinates() - - require.NoError(t, err) - require.Equal(t, c1.Type, cres.Type) - require.Equal(t, cres.LogFile, "mysql-bin.00099") - - c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00100", LogPos: 104} - cres, err = c3.PreviousFileCoordinates() - - require.NoError(t, err) - require.Equal(t, c1.Type, cres.Type) - require.Equal(t, cres.LogFile, "mysql.00.prod.com.00099") - - c4 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00000", LogPos: 104} - _, err = c4.PreviousFileCoordinates() - - require.Error(t, err) -} - -func TestBinlogCoordinatesAsKey(t *testing.T) { - m := make(map[BinlogCoordinates]bool) - - c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104} - c2 := BinlogCoordinates{LogFile: "mysql-bin.00022", LogPos: 104} - c3 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104} - c4 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 222} - - m[c1] = true - m[c2] = true - m[c3] = true - m[c4] = true - - require.Equal(t, len(m), 3) -} - -func TestFileNumberDistance(t *testing.T) { - c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104} - c2 := BinlogCoordinates{LogFile: "mysql-bin.00022", LogPos: 104} - - require.Equal(t, c1.FileNumberDistance(&c1), 0) - require.Equal(t, c1.FileNumberDistance(&c2), 5) - require.Equal(t, c2.FileNumberDistance(&c1), -5) -} - -func TestFileNumber(t *testing.T) { - c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104} - fileNum, numLen := c1.FileNumber() - - require.Equal(t, fileNum, 17) - require.Equal(t, numLen, 5) -} diff --git a/go/vt/vtorc/inst/cluster.go b/go/vt/vtorc/inst/cluster.go index c3a77485e74..f163885a283 100644 --- a/go/vt/vtorc/inst/cluster.go +++ b/go/vt/vtorc/inst/cluster.go @@ -18,16 +18,6 @@ package inst // ClusterInfo makes for a cluster status/info summary type ClusterInfo struct { - Keyspace string - Shard string - CountInstances uint - HeuristicLag int64 - HasAutomatedPrimaryRecovery bool - HasAutomatedIntermediatePrimaryRecovery bool -} - -// ReadRecoveryInfo -func (clusterInfo *ClusterInfo) ReadRecoveryInfo() { - clusterInfo.HasAutomatedPrimaryRecovery = true - clusterInfo.HasAutomatedIntermediatePrimaryRecovery = true + Keyspace string + Shard string } diff --git a/go/vt/vtorc/inst/instance.go b/go/vt/vtorc/inst/instance.go index 7b1ef6abc6b..e055f043440 100644 --- a/go/vt/vtorc/inst/instance.go +++ b/go/vt/vtorc/inst/instance.go @@ -33,13 +33,11 @@ type Instance struct { ServerUUID string Version string VersionComment string - FlavorName string ReadOnly bool BinlogFormat string BinlogRowImage string LogBinEnabled bool LogReplicationUpdatesEnabled bool - SelfBinlogCoordinates BinlogCoordinates SourceHost string SourcePort int SourceUUID string @@ -54,12 +52,6 @@ type Instance struct { GTIDMode string SupportsOracleGTID bool UsingOracleGTID bool - UsingMariaDBGTID bool - UsingPseudoGTID bool // Legacy. Always 'false' - ReadBinlogCoordinates BinlogCoordinates - ExecBinlogCoordinates BinlogCoordinates - IsDetached bool - RelaylogCoordinates BinlogCoordinates LastSQLError string LastIOError string SecondsBehindPrimary sql.NullInt64 @@ -71,13 +63,9 @@ type Instance struct { primaryExecutedGtidSet string // Not exported ReplicationLagSeconds sql.NullInt64 - DataCenter string - Region string - PhysicalEnvironment string + Cell string ReplicationDepth uint - IsCoPrimary bool HasReplicationCredentials bool - SemiSyncEnforced bool SemiSyncPrimaryEnabled bool SemiSyncReplicaEnabled bool SemiSyncPrimaryTimeout uint64 @@ -132,64 +120,14 @@ func (instance *Instance) MajorVersionString() string { return strings.Join(instance.MajorVersion(), ".") } -// IsMariaDB checks whether this is any version of MariaDB -func (instance *Instance) IsMariaDB() bool { - return strings.Contains(instance.Version, "MariaDB") -} - -// IsPercona checks whether this is any version of Percona Server -func (instance *Instance) IsPercona() bool { - return strings.Contains(instance.VersionComment, "Percona") -} - // IsBinlogServer checks whether this is any type of a binlog server func (instance *Instance) IsBinlogServer() bool { return false } -// IsOracleMySQL checks whether this is an Oracle MySQL distribution -func (instance *Instance) IsOracleMySQL() bool { - if instance.IsMariaDB() { - return false - } - if instance.IsPercona() { - return false - } - if instance.IsBinlogServer() { - return false - } - return true -} - -// applyFlavorName -func (instance *Instance) applyFlavorName() { - if instance == nil { - return - } - if instance.IsOracleMySQL() { - instance.FlavorName = "MySQL" - } else if instance.IsMariaDB() { - instance.FlavorName = "MariaDB" - } else if instance.IsPercona() { - instance.FlavorName = "Percona" - } else { - instance.FlavorName = "unknown" - } -} - -// FlavorNameAndMajorVersion returns a string of the combined -// flavor and major version which is useful in some checks. -func (instance *Instance) FlavorNameAndMajorVersion() string { - if instance.FlavorName == "" { - instance.applyFlavorName() - } - - return instance.FlavorName + "-" + instance.MajorVersionString() -} - // IsReplica makes simple heuristics to decide whether this instance is a replica of another instance func (instance *Instance) IsReplica() bool { - return instance.SourceHost != "" && instance.SourceHost != "_" && instance.SourcePort != 0 && (instance.ReadBinlogCoordinates.LogFile != "" || instance.UsingGTID()) + return instance.SourceHost != "" && instance.SourceHost != "_" && instance.SourcePort != 0 && instance.UsingGTID() } // IsPrimary makes simple heuristics to decide whether this instance is a primary (not replicating from any other server), @@ -213,12 +151,7 @@ func (instance *Instance) ReplicationThreadsExist() bool { return instance.ReplicationSQLThreadState.Exists() && instance.ReplicationIOThreadState.Exists() } -// SQLThreadUpToDate returns true when the instance had consumed all relay logs. -func (instance *Instance) SQLThreadUpToDate() bool { - return instance.ReadBinlogCoordinates.Equals(&instance.ExecBinlogCoordinates) -} - -// UsingGTID returns true when this replica is currently replicating via GTID (either Oracle or MariaDB) +// UsingGTID returns true when this replica is currently replicating via GTID func (instance *Instance) UsingGTID() bool { - return instance.UsingOracleGTID || instance.UsingMariaDBGTID + return instance.UsingOracleGTID } diff --git a/go/vt/vtorc/inst/instance_dao.go b/go/vt/vtorc/inst/instance_dao.go index cd6406e2599..3896b3173bf 100644 --- a/go/vt/vtorc/inst/instance_dao.go +++ b/go/vt/vtorc/inst/instance_dao.go @@ -222,12 +222,6 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named instance.LogReplicationUpdatesEnabled = fs.LogReplicaUpdates instance.VersionComment = fs.VersionComment - if instance.LogBinEnabled && fs.PrimaryStatus != nil { - binlogPos, err := getBinlogCoordinatesFromPositionString(fs.PrimaryStatus.FilePosition) - instance.SelfBinlogCoordinates = binlogPos - errorChan <- err - } - instance.SemiSyncPrimaryEnabled = fs.SemiSyncPrimaryEnabled instance.SemiSyncReplicaEnabled = fs.SemiSyncReplicaEnabled instance.SemiSyncPrimaryWaitForReplicaCount = uint(fs.SemiSyncWaitForReplicaCount) @@ -237,29 +231,27 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named instance.SemiSyncPrimaryStatus = fs.SemiSyncPrimaryStatus instance.SemiSyncReplicaStatus = fs.SemiSyncReplicaStatus - if instance.IsOracleMySQL() || instance.IsPercona() { - // Stuff only supported on Oracle / Percona MySQL - // ... - // @@gtid_mode only available in Oracle / Percona MySQL >= 5.6 - instance.GTIDMode = fs.GtidMode - instance.ServerUUID = fs.ServerUuid - if fs.PrimaryStatus != nil { - GtidExecutedPos, err := replication.DecodePosition(fs.PrimaryStatus.Position) - errorChan <- err - if err == nil && GtidExecutedPos.GTIDSet != nil { - instance.ExecutedGtidSet = GtidExecutedPos.GTIDSet.String() - } - } - GtidPurgedPos, err := replication.DecodePosition(fs.GtidPurged) + // Stuff only supported on Oracle / Percona MySQL + // ... + // @@gtid_mode only available in Oracle / Percona MySQL >= 5.6 + instance.GTIDMode = fs.GtidMode + instance.ServerUUID = fs.ServerUuid + if fs.PrimaryStatus != nil { + GtidExecutedPos, err := replication.DecodePosition(fs.PrimaryStatus.Position) errorChan <- err - if err == nil && GtidPurgedPos.GTIDSet != nil { - instance.GtidPurged = GtidPurgedPos.GTIDSet.String() + if err == nil && GtidExecutedPos.GTIDSet != nil { + instance.ExecutedGtidSet = GtidExecutedPos.GTIDSet.String() } - instance.BinlogRowImage = fs.BinlogRowImage + } + GtidPurgedPos, err := replication.DecodePosition(fs.GtidPurged) + errorChan <- err + if err == nil && GtidPurgedPos.GTIDSet != nil { + instance.GtidPurged = GtidPurgedPos.GTIDSet.String() + } + instance.BinlogRowImage = fs.BinlogRowImage - if instance.GTIDMode != "" && instance.GTIDMode != "OFF" { - instance.SupportsOracleGTID = true - } + if instance.GTIDMode != "" && instance.GTIDMode != "OFF" { + instance.SupportsOracleGTID = true } } @@ -273,26 +265,11 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named instance.ReplicationIOThreadRuning = instance.ReplicationIOThreadState.IsRunning() instance.ReplicationSQLThreadRuning = instance.ReplicationSQLThreadState.IsRunning() - binlogPos, err := getBinlogCoordinatesFromPositionString(fs.ReplicationStatus.RelayLogSourceBinlogEquivalentPosition) - instance.ReadBinlogCoordinates = binlogPos - errorChan <- err - - binlogPos, err = getBinlogCoordinatesFromPositionString(fs.ReplicationStatus.FilePosition) - instance.ExecBinlogCoordinates = binlogPos - errorChan <- err - instance.IsDetached, _ = instance.ExecBinlogCoordinates.ExtractDetachedCoordinates() - - binlogPos, err = getBinlogCoordinatesFromPositionString(fs.ReplicationStatus.RelayLogFilePosition) - instance.RelaylogCoordinates = binlogPos - instance.RelaylogCoordinates.Type = RelayLog - errorChan <- err - instance.LastSQLError = emptyQuotesRegexp.ReplaceAllString(strconv.QuoteToASCII(fs.ReplicationStatus.LastSqlError), "") instance.LastIOError = emptyQuotesRegexp.ReplaceAllString(strconv.QuoteToASCII(fs.ReplicationStatus.LastIoError), "") instance.SQLDelay = fs.ReplicationStatus.SqlDelay instance.UsingOracleGTID = fs.ReplicationStatus.AutoPosition - instance.UsingMariaDBGTID = fs.ReplicationStatus.UsingGtid instance.SourceUUID = fs.ReplicationStatus.SourceUuid instance.HasReplicationFilters = fs.ReplicationStatus.HasReplicationFilters @@ -322,7 +299,7 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named // No `goto Cleanup` after this point. // ------------------------------------------------------------------------- - instance.DataCenter = tablet.Alias.Cell + instance.Cell = tablet.Alias.Cell instance.InstanceAlias = topoproto.TabletAliasString(tablet.Alias) { @@ -349,12 +326,7 @@ Cleanup: }() if instanceFound { - if instance.IsCoPrimary { - // Take co-primary into account, and avoid infinite loop - instance.AncestryUUID = fmt.Sprintf("%s,%s", instance.SourceUUID, instance.ServerUUID) - } else { - instance.AncestryUUID = fmt.Sprintf("%s,%s", instance.AncestryUUID, instance.ServerUUID) - } + instance.AncestryUUID = fmt.Sprintf("%s,%s", instance.AncestryUUID, instance.ServerUUID) // Add replication group ancestry UUID as well. Otherwise, VTOrc thinks there are errant GTIDs in group // members and its replicas, even though they are not. instance.AncestryUUID = strings.Trim(instance.AncestryUUID, ",") @@ -368,11 +340,6 @@ Cleanup: if uuid != instance.ServerUUID { redactedExecutedGtidSet.RemoveUUID(uuid) } - if instance.IsCoPrimary && uuid == instance.ServerUUID { - // If this is a co-primary, then this server is likely to show its own generated GTIDs as errant, - // because its co-primary has not applied them yet - redactedExecutedGtidSet.RemoveUUID(uuid) - } } // Avoid querying the database if there's no point: if !redactedExecutedGtidSet.IsEmpty() { @@ -413,18 +380,6 @@ func getKeyspaceShardName(keyspace, shard string) string { return fmt.Sprintf("%v:%v", keyspace, shard) } -func getBinlogCoordinatesFromPositionString(position string) (BinlogCoordinates, error) { - pos, err := replication.DecodePosition(position) - if err != nil || pos.GTIDSet == nil { - return BinlogCoordinates{}, err - } - binLogCoordinates, err := ParseBinlogCoordinates(pos.String()) - if err != nil { - return BinlogCoordinates{}, err - } - return *binLogCoordinates, nil -} - // ReadInstanceClusterAttributes will return the cluster name for a given instance by looking at its primary // and getting it from there. // It is a non-recursive function and so-called-recursion is performed upon periodic reading of @@ -466,13 +421,7 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { if primaryDataFound { replicationDepth = primaryReplicationDepth + 1 } - isCoPrimary := false - if primaryHostname == instance.Hostname && primaryPort == instance.Port { - // co-primary calls for special case, in fear of the infinite loop - isCoPrimary = true - } instance.ReplicationDepth = replicationDepth - instance.IsCoPrimary = isCoPrimary instance.AncestryUUID = ancestryUUID instance.primaryExecutedGtidSet = primaryExecutedGtidSet return nil @@ -508,26 +457,12 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.GTIDMode = m.GetString("gtid_mode") instance.GtidPurged = m.GetString("gtid_purged") instance.GtidErrant = m.GetString("gtid_errant") - instance.UsingMariaDBGTID = m.GetBool("mariadb_gtid") - instance.SelfBinlogCoordinates.LogFile = m.GetString("binary_log_file") - instance.SelfBinlogCoordinates.LogPos = m.GetUint32("binary_log_pos") - instance.ReadBinlogCoordinates.LogFile = m.GetString("source_log_file") - instance.ReadBinlogCoordinates.LogPos = m.GetUint32("read_source_log_pos") - instance.ExecBinlogCoordinates.LogFile = m.GetString("relay_source_log_file") - instance.ExecBinlogCoordinates.LogPos = m.GetUint32("exec_source_log_pos") - instance.IsDetached, _ = instance.ExecBinlogCoordinates.ExtractDetachedCoordinates() - instance.RelaylogCoordinates.LogFile = m.GetString("relay_log_file") - instance.RelaylogCoordinates.LogPos = m.GetUint32("relay_log_pos") - instance.RelaylogCoordinates.Type = RelayLog instance.LastSQLError = m.GetString("last_sql_error") instance.LastIOError = m.GetString("last_io_error") instance.SecondsBehindPrimary = m.GetNullInt64("replication_lag_seconds") instance.ReplicationLagSeconds = m.GetNullInt64("replica_lag_seconds") instance.SQLDelay = m.GetUint32("sql_delay") - instance.DataCenter = m.GetString("data_center") - instance.Region = m.GetString("region") - instance.PhysicalEnvironment = m.GetString("physical_environment") - instance.SemiSyncEnforced = m.GetBool("semi_sync_enforced") + instance.Cell = m.GetString("cell") instance.SemiSyncPrimaryEnabled = m.GetBool("semi_sync_primary_enabled") instance.SemiSyncPrimaryTimeout = m.GetUint64("semi_sync_primary_timeout") instance.SemiSyncPrimaryWaitForReplicaCount = m.GetUint("semi_sync_primary_wait_for_replica_count") @@ -536,7 +471,6 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.SemiSyncPrimaryClients = m.GetUint("semi_sync_primary_clients") instance.SemiSyncReplicaStatus = m.GetBool("semi_sync_replica_status") instance.ReplicationDepth = m.GetUint("replication_depth") - instance.IsCoPrimary = m.GetBool("is_co_primary") instance.HasReplicationCredentials = m.GetBool("has_replication_credentials") instance.IsUpToDate = (m.GetUint("seconds_since_last_checked") <= config.Config.InstancePollSeconds) instance.IsRecentlyChecked = (m.GetUint("seconds_since_last_checked") <= config.Config.InstancePollSeconds*5) @@ -547,8 +481,6 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.InstanceAlias = m.GetString("alias") instance.LastDiscoveryLatency = time.Duration(m.GetInt64("last_discovery_latency")) * time.Nanosecond - instance.applyFlavorName() - // problems if !instance.IsLastCheckValid { instance.Problems = append(instance.Problems, "last_check_invalid") @@ -798,8 +730,6 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo "binlog_row_image", "log_bin", "log_replica_updates", - "binary_log_file", - "binary_log_pos", "source_host", "source_port", "replica_sql_running", @@ -815,27 +745,15 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo "gtid_mode", "gtid_purged", "gtid_errant", - "mariadb_gtid", - "pseudo_gtid", - "source_log_file", - "read_source_log_pos", - "relay_source_log_file", - "exec_source_log_pos", - "relay_log_file", - "relay_log_pos", "last_sql_error", "last_io_error", "replication_lag_seconds", "replica_lag_seconds", "sql_delay", - "data_center", - "region", - "physical_environment", + "cell", "replication_depth", - "is_co_primary", "has_replication_credentials", "allow_tls", - "semi_sync_enforced", "semi_sync_primary_enabled", "semi_sync_primary_timeout", "semi_sync_primary_wait_for_replica_count", @@ -877,8 +795,6 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo args = append(args, instance.BinlogRowImage) args = append(args, instance.LogBinEnabled) args = append(args, instance.LogReplicationUpdatesEnabled) - args = append(args, instance.SelfBinlogCoordinates.LogFile) - args = append(args, instance.SelfBinlogCoordinates.LogPos) args = append(args, instance.SourceHost) args = append(args, instance.SourcePort) args = append(args, instance.ReplicationSQLThreadRuning) @@ -894,27 +810,15 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo args = append(args, instance.GTIDMode) args = append(args, instance.GtidPurged) args = append(args, instance.GtidErrant) - args = append(args, instance.UsingMariaDBGTID) - args = append(args, instance.UsingPseudoGTID) - args = append(args, instance.ReadBinlogCoordinates.LogFile) - args = append(args, instance.ReadBinlogCoordinates.LogPos) - args = append(args, instance.ExecBinlogCoordinates.LogFile) - args = append(args, instance.ExecBinlogCoordinates.LogPos) - args = append(args, instance.RelaylogCoordinates.LogFile) - args = append(args, instance.RelaylogCoordinates.LogPos) args = append(args, instance.LastSQLError) args = append(args, instance.LastIOError) args = append(args, instance.SecondsBehindPrimary) args = append(args, instance.ReplicationLagSeconds) args = append(args, instance.SQLDelay) - args = append(args, instance.DataCenter) - args = append(args, instance.Region) - args = append(args, instance.PhysicalEnvironment) + args = append(args, instance.Cell) args = append(args, instance.ReplicationDepth) - args = append(args, instance.IsCoPrimary) args = append(args, instance.HasReplicationCredentials) args = append(args, instance.AllowTLS) - args = append(args, instance.SemiSyncEnforced) args = append(args, instance.SemiSyncPrimaryEnabled) args = append(args, instance.SemiSyncPrimaryTimeout) args = append(args, instance.SemiSyncPrimaryWaitForReplicaCount) @@ -1122,25 +1026,6 @@ func SnapshotTopologies() error { return ExecDBWriteFunc(writeFunc) } -func ExpireStaleInstanceBinlogCoordinates() error { - expireSeconds := config.Config.ReasonableReplicationLagSeconds * 2 - if expireSeconds < config.StaleInstanceCoordinatesExpireSeconds { - expireSeconds = config.StaleInstanceCoordinatesExpireSeconds - } - writeFunc := func() error { - _, err := db.ExecVTOrc(` - delete from database_instance_stale_binlog_coordinates - where first_seen < NOW() - INTERVAL ? SECOND - `, expireSeconds, - ) - if err != nil { - log.Error(err) - } - return err - } - return ExecDBWriteFunc(writeFunc) -} - // GetDatabaseState takes the snapshot of the database and returns it. func GetDatabaseState() (string, error) { type tableState struct { diff --git a/go/vt/vtorc/inst/instance_dao_test.go b/go/vt/vtorc/inst/instance_dao_test.go index d6c53db11e4..d5bca5d980a 100644 --- a/go/vt/vtorc/inst/instance_dao_test.go +++ b/go/vt/vtorc/inst/instance_dao_test.go @@ -36,9 +36,9 @@ func stripSpaces(s string) string { } func mkTestInstances() []*Instance { - i710 := Instance{InstanceAlias: "zone1-i710", Hostname: "i710", Port: 3306, ServerID: 710, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 10}} - i720 := Instance{InstanceAlias: "zone1-i720", Hostname: "i720", Port: 3306, ServerID: 720, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 20}} - i730 := Instance{InstanceAlias: "zone1-i730", Hostname: "i730", Port: 3306, ServerID: 730, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 30}} + i710 := Instance{InstanceAlias: "zone1-i710", Hostname: "i710", Port: 3306, ServerID: 710} + i720 := Instance{InstanceAlias: "zone1-i720", Hostname: "i720", Port: 3306, ServerID: 720} + i730 := Instance{InstanceAlias: "zone1-i730", Hostname: "i730", Port: 3306, ServerID: 730} instances := []*Instance{&i710, &i720, &i730} for _, instance := range instances { instance.Version = "5.6.7" @@ -61,19 +61,19 @@ func TestMkInsertOdkuSingle(t *testing.T) { s1 := `INSERT ignore INTO database_instance (alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, - binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, - replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, - source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) + binlog_row_image, log_bin, log_replica_updates, source_host, source_port, + replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, + last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, cell, replication_depth, has_replication_credentials, allow_tls, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) VALUES - (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) ON DUPLICATE KEY UPDATE - alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), - semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), + alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), cell=VALUES(cell), replication_depth=VALUES(replication_depth), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), + semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), last_discovery_latency=VALUES(last_discovery_latency), last_seen=VALUES(last_seen) ` a1 := `zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, - FULL, false, false, , 0, , 0, - false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,` + FULL, false, false, , 0, + false, false, 0, 0, false, false, false, , , , , , , , , {0 false}, {0 false}, 0, , 0, false, false, false, 0, 0, false, false, 0, false, 0,` sql1, args1, err := mkInsertOdkuForInstances(instances[:1], false, true) require.NoError(t, err) @@ -88,23 +88,23 @@ func TestMkInsertOdkuThree(t *testing.T) { s3 := `INSERT INTO database_instance (alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, - binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, - replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, - source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) + binlog_row_image, log_bin, log_replica_updates, source_host, source_port, + replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, + last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, cell, replication_depth, has_replication_credentials, allow_tls, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) VALUES - (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), - (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), - (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) ON DUPLICATE KEY UPDATE - alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), - physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), semi_sync_enforced=VALUES(semi_sync_enforced), + alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), cell=VALUES(cell), + replication_depth=VALUES(replication_depth), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), last_discovery_latency=VALUES(last_discovery_latency), last_seen=VALUES(last_seen) ` a3 := ` - zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, - zone1-i720, i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, - zone1-i730, i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, false, false, 0, 0, false, false, false, , , , , , , , , {0 false}, {0 false}, 0, , 0, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i720, i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, false, false, 0, 0, false, false, false, , , , , , , , , {0 false}, {0 false}, 0, , 0, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i730, i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, false, false, 0, 0, false, false, false, , , , , , , , , {0 false}, {0 false}, 0, , 0, false, false, false, 0, 0, false, false, 0, false, 0, ` sql3, args3, err := mkInsertOdkuForInstances(instances[:3], true, true) @@ -377,9 +377,9 @@ func TestReadInstancesByCondition(t *testing.T) { condition: "1=1", instancesRequired: []string{"zone1-0000000100", "zone1-0000000101", "zone1-0000000112", "zone2-0000000200"}, }, { - name: "All instances sort by data_center descending and then alias ascending", + name: "All instances sort by cell descending and then alias ascending", condition: "1=1", - sort: "data_center desc, alias asc", + sort: "cell desc, alias asc", instancesRequired: []string{"zone2-0000000200", "zone1-0000000100", "zone1-0000000101", "zone1-0000000112"}, }, { name: "Filtering by replication_depth", diff --git a/go/vt/vtorc/logic/topology_recovery.go b/go/vt/vtorc/logic/topology_recovery.go index 4c1e4264b5d..c877a1f25d9 100644 --- a/go/vt/vtorc/logic/topology_recovery.go +++ b/go/vt/vtorc/logic/topology_recovery.go @@ -198,10 +198,6 @@ func recoverPrimaryHasPrimary(ctx context.Context, analysisEntry *inst.Replicati // runEmergencyReparentOp runs a recovery for which we have to run ERS. Here waitForAllTablets is a boolean telling ERS whether it should wait for all the tablets // or is it okay to skip 1. func runEmergencyReparentOp(ctx context.Context, analysisEntry *inst.ReplicationAnalysis, recoveryName string, waitForAllTablets bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - if !analysisEntry.ClusterDetails.HasAutomatedPrimaryRecovery { - return false, nil, nil - } - // Read the tablet information from the database to find the shard and keyspace of the tablet tablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) if err != nil { @@ -240,7 +236,7 @@ func runEmergencyReparentOp(ctx context.Context, analysisEntry *inst.Replication reparentutil.EmergencyReparentOptions{ IgnoreReplicas: nil, WaitReplicasTimeout: time.Duration(config.Config.WaitReplicasTimeoutSeconds) * time.Second, - PreventCrossCellPromotion: config.Config.PreventCrossDataCenterPrimaryFailover, + PreventCrossCellPromotion: config.Config.PreventCrossCellPrimaryFailover, WaitAllTablets: waitForAllTablets, }, ) diff --git a/go/vt/vtorc/logic/topology_recovery_dao.go b/go/vt/vtorc/logic/topology_recovery_dao.go index dd5f8a96430..0440bc4b393 100644 --- a/go/vt/vtorc/logic/topology_recovery_dao.go +++ b/go/vt/vtorc/logic/topology_recovery_dao.go @@ -193,9 +193,6 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog topologyRecovery.AnalysisEntry.ClusterDetails.Shard = m.GetString("shard") topologyRecovery.SuccessorAlias = m.GetString("successor_alias") - - topologyRecovery.AnalysisEntry.ClusterDetails.ReadRecoveryInfo() - topologyRecovery.AllErrors = strings.Split(m.GetString("all_errors"), "\n") topologyRecovery.DetectionID = m.GetInt64("detection_id") diff --git a/go/vt/vtorc/logic/vtorc.go b/go/vt/vtorc/logic/vtorc.go index b9e5795a31f..608ea546e26 100644 --- a/go/vt/vtorc/logic/vtorc.go +++ b/go/vt/vtorc/logic/vtorc.go @@ -371,7 +371,6 @@ func ContinuousDiscovery() { go inst.ForgetLongUnseenInstances() go inst.ExpireAudit() - go inst.ExpireStaleInstanceBinlogCoordinates() go process.ExpireNodesHistory() go process.ExpireAvailableNodes() go ExpireRecoveryDetectionHistory() diff --git a/go/vt/vtorc/test/recovery_analysis.go b/go/vt/vtorc/test/recovery_analysis.go index b2ae4ce9520..e635c68ba99 100644 --- a/go/vt/vtorc/test/recovery_analysis.go +++ b/go/vt/vtorc/test/recovery_analysis.go @@ -38,15 +38,9 @@ type InfoForRecoveryAnalysis struct { DurabilityPolicy string IsInvalid int IsPrimary int - IsCoPrimary int Hostname string Port int - DataCenter string - Region string - PhysicalEnvironment string - LogFile string - LogPos uint32 - IsStaleBinlogCoordinates int + Cell string GTIDMode string ErrantGTID string LastCheckValid int @@ -87,16 +81,11 @@ type InfoForRecoveryAnalysis struct { func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap := make(sqlutils.RowMap) - rowMap["binary_log_file"] = sqlutils.CellData{String: info.LogFile, Valid: true} - rowMap["binary_log_pos"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.LogPos), Valid: true} - rowMap["count_binlog_server_replicas"] = sqlutils.CellData{Valid: false} - rowMap["count_co_primary_replicas"] = sqlutils.CellData{Valid: false} rowMap["count_delayed_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountDelayedReplicas), Valid: true} rowMap["count_distinct_logging_major_versions"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountDistinctMajorVersionsLoggingReplicas), Valid: true} rowMap["count_downtimed_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountDowntimedReplicas), Valid: true} rowMap["count_lagging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountLaggingReplicas), Valid: true} rowMap["count_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountLoggingReplicas), Valid: true} - rowMap["count_mariadb_gtid_replicas"] = sqlutils.CellData{Valid: false} rowMap["count_mixed_based_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountMixedBasedLoggingReplicas), Valid: true} rowMap["count_oracle_gtid_replicas"] = sqlutils.CellData{Valid: false} rowMap["count_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountReplicas), Valid: true} @@ -105,11 +94,10 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["count_semi_sync_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountSemiSyncReplicasEnabled), Valid: true} rowMap["count_statement_based_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountStatementBasedLoggingReplicas), Valid: true} rowMap["count_valid_binlog_server_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidBinlogServerReplicas), Valid: true} - rowMap["count_valid_mariadb_gtid_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidMariaDBGTIDReplicas), Valid: true} rowMap["count_valid_oracle_gtid_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidOracleGTIDReplicas), Valid: true} rowMap["count_valid_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidReplicas), Valid: true} rowMap["count_valid_replicating_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidReplicatingReplicas), Valid: true} - rowMap["data_center"] = sqlutils.CellData{String: info.DataCenter, Valid: true} + rowMap["cell"] = sqlutils.CellData{String: info.Cell, Valid: true} rowMap["downtime_end_timestamp"] = sqlutils.CellData{String: info.DowntimeEndTimestamp, Valid: true} rowMap["downtime_remaining_seconds"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.DowntimeRemainingSeconds), Valid: true} rowMap["durability_policy"] = sqlutils.CellData{String: info.DurabilityPolicy, Valid: true} @@ -117,13 +105,11 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["gtid_mode"] = sqlutils.CellData{String: info.GTIDMode, Valid: true} rowMap["hostname"] = sqlutils.CellData{String: info.Hostname, Valid: true} rowMap["is_binlog_server"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsBinlogServer), Valid: true} - rowMap["is_co_primary"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsCoPrimary), Valid: true} rowMap["is_downtimed"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsDowntimed), Valid: true} rowMap["is_failing_to_connect_to_primary"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsFailingToConnectToPrimary), Valid: true} rowMap["is_invalid"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsInvalid), Valid: true} rowMap["is_last_check_valid"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.LastCheckValid), Valid: true} rowMap["is_primary"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsPrimary), Valid: true} - rowMap["is_stale_binlog_coordinates"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsStaleBinlogCoordinates), Valid: true} rowMap["keyspace_type"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.KeyspaceType), Valid: true} rowMap["keyspace"] = sqlutils.CellData{String: info.Keyspace, Valid: true} rowMap["shard"] = sqlutils.CellData{String: info.Shard, Valid: true} @@ -132,7 +118,6 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["max_replica_gtid_errant"] = sqlutils.CellData{String: info.MaxReplicaGTIDErrant, Valid: true} rowMap["max_replica_gtid_mode"] = sqlutils.CellData{String: info.MaxReplicaGTIDMode, Valid: true} rowMap["min_replica_gtid_mode"] = sqlutils.CellData{String: info.MinReplicaGTIDMode, Valid: true} - rowMap["physical_environment"] = sqlutils.CellData{String: info.PhysicalEnvironment, Valid: true} rowMap["port"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.Port), Valid: true} if info.PrimaryTabletInfo == nil { rowMap["primary_tablet_info"] = sqlutils.CellData{Valid: false} @@ -142,7 +127,6 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { } rowMap["primary_timestamp"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.PrimaryTimestamp), Valid: true} rowMap["read_only"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.ReadOnly), Valid: true} - rowMap["region"] = sqlutils.CellData{String: info.Region, Valid: true} rowMap["replication_depth"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.ReplicationDepth), Valid: true} rowMap["replication_stopped"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.ReplicationStopped), Valid: true} rowMap["semi_sync_primary_clients"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.SemiSyncPrimaryClients), Valid: true} @@ -158,7 +142,7 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { func (info *InfoForRecoveryAnalysis) SetValuesFromTabletInfo() { info.Hostname = info.TabletInfo.MysqlHostname info.Port = int(info.TabletInfo.MysqlPort) - info.DataCenter = info.TabletInfo.Alias.Cell + info.Cell = info.TabletInfo.Alias.Cell info.Keyspace = info.TabletInfo.Keyspace info.Shard = info.TabletInfo.Shard }