From 809c74cfaf4431738be5ce18d67fd57361fd89f3 Mon Sep 17 00:00:00 2001 From: Ondra Machacek Date: Sun, 4 Jul 2021 14:55:05 +0200 Subject: [PATCH 01/43] OCPBUGSM-30066: Retry wait for operator in case it failed (#319) This commit add a retry mechanism while waiting for the operator to be ready. If we apply the CR of the operator it may happen (bug 1968606), that the OLM will report the Failed state, even that it's actually progressing. So we decided to ignore the failed state for few times. --- .../assisted_installer_controller.go | 22 +++++++++++++--- .../assisted_installer_controller_test.go | 26 +++++++++++++++++++ src/utils/utils.go | 4 +++ 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index 4b5d132ca..57ecb2282 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -34,6 +34,9 @@ import ( ) const ( + // We retry 10 times in 30sec interval meaning that we tolerate the operator to be in failed + // state for 5minutes. + failedOperatorRetry = 10 generalWaitTimeoutInt = 30 controllerLogsSecondsAgo = 120 * 60 consoleOperatorName = "console" @@ -88,10 +91,11 @@ type ControllerStatus struct { type controller struct { ControllerConfig - log *logrus.Logger - ops ops.Ops - ic inventory_client.InventoryClient - kc k8s_client.K8SClient + log *logrus.Logger + ops ops.Ops + ic inventory_client.InventoryClient + kc k8s_client.K8SClient + retryMap map[string]int } func NewController(log *logrus.Logger, cfg ControllerConfig, ops ops.Ops, ic inventory_client.InventoryClient, kc k8s_client.K8SClient) *controller { @@ -101,6 +105,7 @@ func NewController(log *logrus.Logger, cfg ControllerConfig, ops ops.Ops, ic inv ops: ops, ic: ic, kc: kc, + retryMap: make(map[string]int), } } @@ -785,6 +790,15 @@ func (c controller) waitForOLMOperators() bool { } operatorStatus := utils.CsvStatusToOperatorStatus(string(csv.Status.Phase)) + + // FIXME: We retry the check of the operator status in case it's in failed state to WA bug 1968606 + // Remove this code when bug 1968606 is fixed + if utils.IsStatusFailed(operatorStatus) && c.retryMap[operator.Name] < failedOperatorRetry { + c.retryMap[operator.Name]++ + c.log.Warnf("Operator %s has failed state retry(%d/%d) the check.", operator.Name, c.retryMap[operator.Name], failedOperatorRetry) + continue + } + err = c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operator.Name, operatorStatus, csv.Status.Message) if err != nil { c.log.WithError(err).Warnf("Failed to update olm %s status", operator.Name) diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 2bcb925b9..2559e273b 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -992,6 +992,32 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", gomock.Any(), gomock.Any()).Return(nil).Times(1) Expect(assistedController.waitForOLMOperators()).To(Equal(false)) }) + It("check that we tolerate the failed state reported by CSV", func() { + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( + []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 1}}, nil, + ).Times(1) + mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) + mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseFailed}}, nil).Times(1) + + Expect(assistedController.waitForOLMOperators()).To(Equal(false)) + Expect(assistedController.retryMap["lso"]).To(Equal(1)) + + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( + []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 1}}, nil, + ).Times(1) + mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) + mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseSucceeded}}, nil).Times(1) + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusAvailable, gomock.Any()).Return(nil).Times(1) + + Expect(assistedController.waitForOLMOperators()).To(Equal(false)) + Expect(assistedController.retryMap["lso"]).To(Equal(1)) + + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( + []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusAvailable, TimeoutSeconds: 1}}, nil, + ).Times(1) + + Expect(assistedController.waitForOLMOperators()).To(Equal(true)) + }) }) Context("waitingForClusterVersion", func() { diff --git a/src/utils/utils.go b/src/utils/utils.go index f19ef79bc..f00c5a6ab 100644 --- a/src/utils/utils.go +++ b/src/utils/utils.go @@ -283,6 +283,10 @@ func CsvStatusToOperatorStatus(csvStatus string) models.OperatorStatus { } } +func IsStatusFailed(operatorStatus models.OperatorStatus) bool { + return operatorStatus == models.OperatorStatusFailed +} + func ClusterOperatorConditionsToMonitoredOperatorStatus(conditions []configv1.ClusterOperatorStatusCondition) (models.OperatorStatus, string) { for _, condition := range conditions { if condition.Type == configv1.OperatorAvailable && condition.Status == configv1.ConditionTrue { From 56359ed5b6f109c898682a80dc22941b490673d1 Mon Sep 17 00:00:00 2001 From: Yuval Goldberg Date: Mon, 5 Jul 2021 14:51:11 +0300 Subject: [PATCH 02/43] MGMT-6663: Update progress of operators only when there's a new progress (#298) * MGMT-6663: Update progress of operators only when there's a new progress * MGMT-6663: Enhance log operator update status --- .../assisted_installer_controller.go | 49 ++++++++++++------- .../assisted_installer_controller_test.go | 30 ++++++++++-- 2 files changed, 58 insertions(+), 21 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index 57ecb2282..90f1330ca 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -799,13 +799,15 @@ func (c controller) waitForOLMOperators() bool { continue } - err = c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operator.Name, operatorStatus, csv.Status.Message) - if err != nil { - c.log.WithError(err).Warnf("Failed to update olm %s status", operator.Name) - continue - } + if operator.Status != operatorStatus || (operator.StatusInfo != csv.Status.Message && csv.Status.Message != "") { + c.log.Infof("CSV %s updated, status: %s -> %s, message: %s -> %s.", operator.Name, operator.Status, operatorStatus, operator.StatusInfo, csv.Status.Message) - c.log.Infof("CSV %s is in status %s, message %s.", operator.Name, csv.Status.Phase, csv.Status.Message) + err = c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operator.Name, operatorStatus, csv.Status.Message) + if err != nil { + c.log.WithError(err).Warnf("Failed to update olm %s status", operator.Name) + continue + } + } } return false } @@ -818,21 +820,34 @@ func (c controller) isOperatorAvailableInCluster(operatorName string) bool { return false } - operatorStatus, operatorMessage := utils.ClusterOperatorConditionsToMonitoredOperatorStatus(co.Status.Conditions) - err = c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operatorName, operatorStatus, operatorMessage) + operatorStatusInService, err := c.ic.GetClusterMonitoredOperator(utils.GenerateRequestContext(), c.ClusterID, operatorName) if err != nil { - c.log.WithError(err).Warnf("Failed to update %s operator status %s with message %s", operatorName, operatorStatus, operatorMessage) + c.log.WithError(err).Errorf("Failed to get cluster %s operator %s status", c.ClusterID, operatorName) return false } - if !c.checkOperatorStatusCondition(co, configv1.OperatorAvailable, configv1.ConditionTrue) || - !c.checkOperatorStatusCondition(co, configv1.OperatorDegraded, configv1.ConditionFalse) { - return false - } + operatorStatus, operatorMessage := utils.ClusterOperatorConditionsToMonitoredOperatorStatus(co.Status.Conditions) + if operatorStatusInService.Status != operatorStatus || (operatorStatusInService.StatusInfo != operatorMessage && operatorMessage != "") { + c.log.Infof("Operator %s updated, status: %s -> %s, message: %s -> %s.", operatorName, operatorStatusInService.Status, operatorStatus, operatorStatusInService.StatusInfo, operatorMessage) - c.log.Infof("%s operator is available in cluster", operatorName) + err = c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operatorName, operatorStatus, operatorMessage) + if err != nil { + c.log.WithError(err).Warnf("Failed to update %s operator status %s with message %s", operatorName, operatorStatus, operatorMessage) + return false + } - return true + if !c.checkOperatorStatusCondition(co, configv1.OperatorAvailable, configv1.ConditionTrue) || + !c.checkOperatorStatusCondition(co, configv1.OperatorDegraded, configv1.ConditionFalse) { + return false + } + + c.log.Infof("%s operator is available in cluster", operatorName) + + return true + + } + + return false } func (c controller) isOperatorAvailableInService(operatorName string) bool { @@ -883,7 +898,6 @@ func (c controller) waitingForClusterVersion(ctx context.Context) error { } operatorStatus, operatorMessage := utils.ClusterOperatorConditionsToMonitoredOperatorStatus(co.Status.Conditions) - if cvoStatusInService.Status != operatorStatus || (cvoStatusInService.StatusInfo != operatorMessage && operatorMessage != "") { // This is a common pattern to ensure the channel is empty after a stop has been called // More info on time/sleep.go documentation @@ -892,8 +906,7 @@ func (c controller) waitingForClusterVersion(ctx context.Context) error { } timer.Reset(WaitTimeout) - status := fmt.Sprintf("Cluster version status: %s message: %s", operatorStatus, operatorMessage) - c.log.Infof(status) + c.log.Infof("CVO updated, status: %s -> %s, message: %s -> %s.", cvoStatusInService.Status, operatorStatus, cvoStatusInService.StatusInfo, operatorMessage) // Update built-in monitored operator cluster version status if err := c.ic.UpdateClusterOperator(utils.GenerateRequestContext(), c.ClusterID, cvoOperatorName, operatorStatus, operatorMessage); err != nil { diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 2559e273b..29ac2e610 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -156,6 +156,7 @@ var _ = Describe("installer HostRoleMaster role", func() { setConsoleAsAvailable := func(clusterID string) { mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return(validConsoleOperator, nil).Times(1) + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), clusterID, consoleOperatorName).Return(&models.MonitoredOperator{Status: models.OperatorStatusProgressing}, nil).Times(1) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), clusterID, consoleOperatorName, models.OperatorStatusAvailable, gomock.Any()).Return(nil).Times(1) mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), clusterID, consoleOperatorName).Return(&models.MonitoredOperator{Status: models.OperatorStatusAvailable}, nil).Times(1) } @@ -983,12 +984,35 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().GetCSVFromSubscription(gomock.Any(), gomock.Any()).Return("", fmt.Errorf("Error")).Times(1) Expect(assistedController.waitForOLMOperators()).To(Equal(false)) }) - It("Wait if OLM operator progressing", func() { + It("Wait if OLM operator progressing - no update (empty message)", func() { mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso", Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm}}, nil, + []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", + Name: "lso", Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm}}, nil, ).Times(1) mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) - mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{}, nil).Times(1) + mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, nil).Times(1) + Expect(assistedController.waitForOLMOperators()).To(Equal(false)) + }) + It("Wait if OLM operator progressing - no update (same message)", func() { + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( + []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", + Name: "lso", Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + StatusInfo: "same"}}, nil, + ).Times(1) + mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) + mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{ + Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling, Message: "same"}}, nil).Times(1) + Expect(assistedController.waitForOLMOperators()).To(Equal(false)) + }) + It("Wait if OLM operator progressing - update (new message)", func() { + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( + []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", + Name: "lso", Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + StatusInfo: "old"}}, nil, + ).Times(1) + mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) + mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{ + Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling, Message: "new"}}, nil).Times(1) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", gomock.Any(), gomock.Any()).Return(nil).Times(1) Expect(assistedController.waitForOLMOperators()).To(Equal(false)) }) From df41ab506fca705f62197b508869f3d6e998a903 Mon Sep 17 00:00:00 2001 From: Sagi Dayan Date: Sun, 11 Jul 2021 10:40:52 +0300 Subject: [PATCH 03/43] NO-ISSUE: Adding sagidayan as a code reviewer (#322) --- OWNERS_ALIASES | 1 + 1 file changed, 1 insertion(+) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index af14e79dc..59820b870 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -32,3 +32,4 @@ aliases: - masayag - jordigilh - machacekondra + - sagidayan From a9051890d8a89d859c22038115c50569e7bee10f Mon Sep 17 00:00:00 2001 From: Sagi Dayan Date: Sun, 11 Jul 2021 13:00:50 +0300 Subject: [PATCH 04/43] Bug 1979009: Change log message about EFI support (#323) - Minor log changes on `setting EFI boot order unsupported` - In `SetBootOrder` function - passing `liveLogger=nil` in order to not have an uneeded log at that stage (Failed executing nsenter...) --- src/ops/ops.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ops/ops.go b/src/ops/ops.go index e02098c64..f92ede238 100644 --- a/src/ops/ops.go +++ b/src/ops/ops.go @@ -220,9 +220,9 @@ func (o *ops) Reboot() error { } func (o *ops) SetBootOrder(device string) error { - _, err := o.ExecPrivilegeCommand(o.logWriter, "test", "-d", "/sys/firmware/efi") + _, err := o.ExecPrivilegeCommand(nil, "test", "-d", "/sys/firmware/efi") if err != nil { - o.log.Info("efi not supported") + o.log.Info("Setting boot order on efi is not supported. Skipping...") return nil } From ebf29ac5f07e40603f447410979314f3bdbbe85d Mon Sep 17 00:00:00 2001 From: Yuval Goldberg Date: Sun, 11 Jul 2021 14:49:50 +0300 Subject: [PATCH 05/43] MGMT-7201: Wrap PostInstallConfigs logs and pass the entire failure message to the service (#324) --- .../assisted_installer_controller.go | 34 +++++++------------ .../assisted_installer_controller_test.go | 2 +- 2 files changed, 14 insertions(+), 22 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index 90f1330ca..eb8431521 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -372,6 +372,7 @@ func (c controller) PostInstallConfigs(ctx context.Context, wg *sync.WaitGroup, return } if err != nil { + c.log.Error(err) errMessage = err.Error() status.Error() } @@ -385,9 +386,8 @@ func (c controller) postInstallConfigs(ctx context.Context) error { c.log.Infof("Waiting for cluster version operator: %t", c.WaitForClusterVersion) if c.WaitForClusterVersion { - err = c.waitingForClusterVersion(ctx) - if err != nil { - return err + if err = c.waitingForClusterVersion(ctx); err != nil { + return errors.Wrapf(err, "Timeout while waiting for cluster version to be available") } } @@ -406,32 +406,29 @@ func (c controller) postInstallConfigs(ctx context.Context) error { err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.addRouterCAToClusterCA) if err != nil { - return errors.Errorf("Timeout while waiting router ca data") + return errors.Wrapf(err, "Timeout while waiting router ca data") } unpatch, err := utils.EtcdPatchRequired(c.ControllerConfig.OpenshiftVersion) if err != nil { - return err + return errors.Wrapf(err, "Failed to patch etcd") } if unpatch && c.HighAvailabilityMode != models.ClusterHighAvailabilityModeNone { - err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.unpatchEtcd) - if err != nil { - return errors.Errorf("Timeout while trying to unpatch etcd") + if err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.unpatchEtcd); err != nil { + return errors.Wrapf(err, "Timeout while trying to unpatch etcd") } } else { c.log.Infof("Skipping etcd unpatch for cluster version %s", c.ControllerConfig.OpenshiftVersion) } - err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.validateConsoleAvailability) - if err != nil { - return errors.Errorf("Timeout while waiting for console to become available") + if err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.validateConsoleAvailability); err != nil { + return errors.Wrapf(err, "Timeout while waiting for console to become available") } // Apply post install manifests err = utils.WaitForPredicateWithContext(ctx, retryPostManifestTimeout, GeneralWaitInterval, c.applyPostInstallManifests) if err != nil { - c.log.WithError(err).Warnf("Failed to apply post manifests.") - return err + return errors.Wrapf(err, "Failed to apply post manifests") } waitTimeout := c.getMaximumOLMTimeout() @@ -442,8 +439,7 @@ func (c controller) postInstallConfigs(ctx context.Context) error { if err = c.updatePendingOLMOperators(); err != nil { return errors.Errorf("Timeout while waiting for some of the operators and not able to update its state") } - c.log.WithError(err).Warnf("Timeout while waiting for OLM operators be installed") - return err + return errors.Wrapf(err, "Timeout while waiting for OLM operators be installed") } return nil @@ -917,15 +913,11 @@ func (c controller) waitingForClusterVersion(ctx context.Context) error { return false } - err := utils.WaitForPredicateWithTimer(ctx, WaitTimeout, GeneralProgressUpdateInt, isClusterVersionAvailable) - if err != nil { - return errors.Wrapf(err, "Timeout while waiting for cluster version to be available") - } - return nil + return utils.WaitForPredicateWithTimer(ctx, WaitTimeout, GeneralProgressUpdateInt, isClusterVersionAvailable) } func (c controller) sendCompleteInstallation(ctx context.Context, isSuccess bool, errorInfo string) { - c.log.Infof("Start complete installation step, with params success:%t, error info %s", isSuccess, errorInfo) + c.log.Infof("Start complete installation step, with params success: %t, error info: %s", isSuccess, errorInfo) _ = utils.WaitForPredicateWithContext(ctx, CompleteTimeout, GeneralProgressUpdateInt, func() bool { ctxReq := utils.GenerateRequestContext() if err := c.ic.CompleteInstallation(ctxReq, c.ClusterID, isSuccess, errorInfo); err != nil { diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 29ac2e610..1da15cbc2 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -610,7 +610,7 @@ var _ = Describe("installer HostRoleMaster role", func() { setClusterAsFinalizing() mockk8sclient.EXPECT().GetConfigMap(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("aaa")).MinTimes(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", false, - "Timeout while waiting router ca data").Return(nil).Times(1) + "Timeout while waiting router ca data: timed out").Return(nil).Times(1) // Patching NS mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) From 706bbf2b41afdf76cc4d0200cc63241b8b89456b Mon Sep 17 00:00:00 2001 From: Sagi Dayan Date: Tue, 13 Jul 2021 11:08:21 +0300 Subject: [PATCH 06/43] Bug 1979009: Change log message about EFI support (#326) - Minor change to log regarding boot order setup on BIOS systems --- src/ops/ops.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ops/ops.go b/src/ops/ops.go index f92ede238..066f74dd1 100644 --- a/src/ops/ops.go +++ b/src/ops/ops.go @@ -222,7 +222,7 @@ func (o *ops) Reboot() error { func (o *ops) SetBootOrder(device string) error { _, err := o.ExecPrivilegeCommand(nil, "test", "-d", "/sys/firmware/efi") if err != nil { - o.log.Info("Setting boot order on efi is not supported. Skipping...") + o.log.Info("setting the boot order on BIOS systems is not supported. Skipping...") return nil } From ec000b28cc7500505680891509b9f918bee30e35 Mon Sep 17 00:00:00 2001 From: Omer Tuchfeld Date: Wed, 14 Jul 2021 11:22:39 +0200 Subject: [PATCH 07/43] MGMT-7210: Upgrade Go version to 1.16 (#325) Older versions of go are out of support, so for security compliance, we were trying to get all components on the latest version. 1.14 is already out of support, i.e. https://endoflife.date/go --- Dockerfile.assisted-installer | 2 +- Dockerfile.assisted-installer-build | 2 +- Dockerfile.assisted-installer-controller | 2 +- go.mod | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Dockerfile.assisted-installer b/Dockerfile.assisted-installer index 281f8eba5..740938ee1 100644 --- a/Dockerfile.assisted-installer +++ b/Dockerfile.assisted-installer @@ -1,4 +1,4 @@ -FROM registry.ci.openshift.org/openshift/release:golang-1.15 AS builder +FROM registry.ci.openshift.org/openshift/release:golang-1.16 AS builder ENV GOFLAGS=-mod=mod WORKDIR /go/src/github.com/openshift/assisted-installer diff --git a/Dockerfile.assisted-installer-build b/Dockerfile.assisted-installer-build index 173efc9e2..b82b49f41 100644 --- a/Dockerfile.assisted-installer-build +++ b/Dockerfile.assisted-installer-build @@ -1,4 +1,4 @@ -FROM registry.ci.openshift.org/openshift/release:golang-1.15 +FROM registry.ci.openshift.org/openshift/release:golang-1.16 ENV GO111MODULE=on ENV GOFLAGS="" diff --git a/Dockerfile.assisted-installer-controller b/Dockerfile.assisted-installer-controller index 3a97db430..be411fead 100644 --- a/Dockerfile.assisted-installer-controller +++ b/Dockerfile.assisted-installer-controller @@ -1,6 +1,6 @@ FROM quay.io/openshift/origin-cli:4.9.0 AS cli -FROM registry.ci.openshift.org/openshift/release:golang-1.15 AS builder +FROM registry.ci.openshift.org/openshift/release:golang-1.16 AS builder ENV GOFLAGS=-mod=mod WORKDIR /go/src/github.com/openshift/assisted-installer diff --git a/go.mod b/go.mod index 3e4255297..2ae12111d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/openshift/assisted-installer -go 1.14 +go 1.16 require ( github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 // indirect From e87d07c3dd341a337c7a36ef3e388598a5f32dd8 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Fri, 16 Jul 2021 16:11:46 +0200 Subject: [PATCH 08/43] NO-ISSUE: Log message before uploading logs to service (#330) Signed-off-by: Flavio Percoco Co-authored-by: Flavio Percoco --- src/installer/installer.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/installer/installer.go b/src/installer/installer.go index b759364a8..ab43e0a83 100644 --- a/src/installer/installer.go +++ b/src/installer/installer.go @@ -136,6 +136,7 @@ func (i *installer) InstallNode() error { } //upload host logs and report log status before reboot + i.log.Infof("Uploading logs and reporting status before rebooting the node %s for cluster %s", i.Config.HostID, i.Config.ClusterID) i.inventoryClient.HostLogProgressReport(ctx, i.Config.ClusterID, i.Config.HostID, models.LogsStateRequested) _, err = i.ops.UploadInstallationLogs(isBootstrap || i.HighAvailabilityMode == models.ClusterHighAvailabilityModeNone) if err != nil { From 1f26de355ad0b0a97680af4deec8d703a4c309c6 Mon Sep 17 00:00:00 2001 From: Yuval Goldberg Date: Mon, 19 Jul 2021 10:10:53 +0300 Subject: [PATCH 09/43] MGMT-7149: Merge wait for operators (#331) Switch to a a generic `isOperatorAvailable` function that would be used for getting any class that inherits `OperatorHandler`. This function gets the operator status from the service, if the status is available - stop running. Otherwise, get the operator status locally and check if it is different than the status at the service, if so - send an update to the service. There are 3 implementations for the OperatorHandler` interface: - ClusterOperatorHandler (such as console) - ClusterVersionHandler (only CVO) - ClusterServiceVersionHandler (such as OCS, LSO) --- .../assisted_installer_controller.go | 198 ++----- .../assisted_installer_controller_test.go | 497 +++++++++++------- .../operator_handler.go | 162 ++++++ 3 files changed, 513 insertions(+), 344 deletions(-) create mode 100644 src/assisted_installer_controller/operator_handler.go diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index eb8431521..e084235d8 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -23,7 +23,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - configv1 "github.com/openshift/api/config/v1" "github.com/openshift/assisted-installer/src/common" "github.com/openshift/assisted-installer/src/inventory_client" "github.com/openshift/assisted-installer/src/k8s_client" @@ -40,7 +39,6 @@ const ( generalWaitTimeoutInt = 30 controllerLogsSecondsAgo = 120 * 60 consoleOperatorName = "console" - cvoOperatorName = "cvo" ingressConfigMapName = "default-ingress-cert" ingressConfigMapNamespace = "openshift-config-managed" dnsServiceName = "dns-default" @@ -51,6 +49,7 @@ const ( KeepWaiting = false ExitWaiting = true customManifestsFile = "custom_manifests.yaml" + kubeconfigFileName = "kubeconfig-noingress" ) var ( @@ -91,11 +90,10 @@ type ControllerStatus struct { type controller struct { ControllerConfig - log *logrus.Logger - ops ops.Ops - ic inventory_client.InventoryClient - kc k8s_client.K8SClient - retryMap map[string]int + log *logrus.Logger + ops ops.Ops + ic inventory_client.InventoryClient + kc k8s_client.K8SClient } func NewController(log *logrus.Logger, cfg ControllerConfig, ops ops.Ops, ic inventory_client.InventoryClient, kc k8s_client.K8SClient) *controller { @@ -105,7 +103,6 @@ func NewController(log *logrus.Logger, cfg ControllerConfig, ops ops.Ops, ic inv ops: ops, ic: ic, kc: kc, - retryMap: make(map[string]int), } } @@ -431,9 +428,7 @@ func (c controller) postInstallConfigs(ctx context.Context) error { return errors.Wrapf(err, "Failed to apply post manifests") } - waitTimeout := c.getMaximumOLMTimeout() - err = utils.WaitForPredicateWithContext(ctx, waitTimeout, GeneralWaitInterval, c.waitForOLMOperators) - if err != nil { + if err != c.waitForOLMOperators(ctx) { // In case the timeout occur, we have to update the pending OLM operators to failed state, // so the assisted-service can update the cluster state to completed. if err = c.updatePendingOLMOperators(); err != nil { @@ -721,14 +716,7 @@ func (c controller) addRouterCAToClusterCA() bool { } -func (c controller) getMaximumOLMTimeout() time.Duration { - - operators, err := c.ic.GetClusterMonitoredOLMOperators(context.TODO(), c.ClusterID) - if err != nil { - c.log.WithError(err).Warningf("Failed to connect to assisted service") - return WaitTimeout - } - +func (c controller) getMaximumOLMTimeout(operators []*models.MonitoredOperator) time.Duration { timeout := WaitTimeout.Seconds() for _, operator := range operators { timeout = math.Max(float64(operator.TimeoutSeconds), timeout) @@ -737,16 +725,16 @@ func (c controller) getMaximumOLMTimeout() time.Duration { return time.Duration(timeout * float64(time.Second)) } -func (c controller) getProgressingOLMOperators() ([]models.MonitoredOperator, error) { - ret := make([]models.MonitoredOperator, 0) +func (c controller) getProgressingOLMOperators() ([]*models.MonitoredOperator, error) { + ret := make([]*models.MonitoredOperator, 0) operators, err := c.ic.GetClusterMonitoredOLMOperators(context.TODO(), c.ClusterID) if err != nil { c.log.WithError(err).Warningf("Failed to connect to assisted service") return ret, err } - for _, operator := range operators { - if operator.Status != models.OperatorStatusAvailable && operator.Status != models.OperatorStatusFailed { - ret = append(ret, operator) + for index := range operators { + if operators[index].Status != models.OperatorStatusAvailable && operators[index].Status != models.OperatorStatusFailed { + ret = append(ret, &operators[index]) } } return ret, nil @@ -766,151 +754,50 @@ func (c controller) updatePendingOLMOperators() error { } // waitForOLMOperators wait until all OLM monitored operators are available or failed. -func (c controller) waitForOLMOperators() bool { - c.log.Infof("Checking OLM operators") - operators, _ := c.getProgressingOLMOperators() - if len(operators) == 0 { - return true +func (c controller) waitForOLMOperators(ctx context.Context) error { + operators, err := c.getProgressingOLMOperators() + if err != nil { + return err } - for _, operator := range operators { - csvName, err := c.kc.GetCSVFromSubscription(operator.Namespace, operator.SubscriptionName) - if err != nil { - c.log.WithError(err).Warnf("Failed to get subscription of operator %s", operator.Name) - continue - } - - csv, err := c.kc.GetCSV(operator.Namespace, csvName) - if err != nil { - c.log.WithError(err).Warnf("Failed to get %s", operator.Name) - continue - } - - operatorStatus := utils.CsvStatusToOperatorStatus(string(csv.Status.Phase)) - - // FIXME: We retry the check of the operator status in case it's in failed state to WA bug 1968606 - // Remove this code when bug 1968606 is fixed - if utils.IsStatusFailed(operatorStatus) && c.retryMap[operator.Name] < failedOperatorRetry { - c.retryMap[operator.Name]++ - c.log.Warnf("Operator %s has failed state retry(%d/%d) the check.", operator.Name, c.retryMap[operator.Name], failedOperatorRetry) - continue - } - - if operator.Status != operatorStatus || (operator.StatusInfo != csv.Status.Message && csv.Status.Message != "") { - c.log.Infof("CSV %s updated, status: %s -> %s, message: %s -> %s.", operator.Name, operator.Status, operatorStatus, operator.StatusInfo, csv.Status.Message) - - err = c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operator.Name, operatorStatus, csv.Status.Message) - if err != nil { - c.log.WithError(err).Warnf("Failed to update olm %s status", operator.Name) - continue - } - } + if len(operators) == 0 { + return nil } - return false -} -func (c controller) isOperatorAvailableInCluster(operatorName string) bool { - c.log.Infof("Checking %s operator availability status", operatorName) - co, err := c.kc.GetClusterOperator(operatorName) - if err != nil { - c.log.WithError(err).Warnf("Failed to get %s operator", operatorName) - return false - } + handlers := make(map[string]*ClusterServiceVersionHandler) - operatorStatusInService, err := c.ic.GetClusterMonitoredOperator(utils.GenerateRequestContext(), c.ClusterID, operatorName) - if err != nil { - c.log.WithError(err).Errorf("Failed to get cluster %s operator %s status", c.ClusterID, operatorName) - return false + for index := range operators { + handlers[operators[index].Name] = NewClusterServiceVersionHandler(c.kc, operators[index]) } - operatorStatus, operatorMessage := utils.ClusterOperatorConditionsToMonitoredOperatorStatus(co.Status.Conditions) - if operatorStatusInService.Status != operatorStatus || (operatorStatusInService.StatusInfo != operatorMessage && operatorMessage != "") { - c.log.Infof("Operator %s updated, status: %s -> %s, message: %s -> %s.", operatorName, operatorStatusInService.Status, operatorStatus, operatorStatusInService.StatusInfo, operatorMessage) - - err = c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operatorName, operatorStatus, operatorMessage) - if err != nil { - c.log.WithError(err).Warnf("Failed to update %s operator status %s with message %s", operatorName, operatorStatus, operatorMessage) - return false + areOLMOperatorsAvailable := func() bool { + if len(handlers) == 0 { + return true } - if !c.checkOperatorStatusCondition(co, configv1.OperatorAvailable, configv1.ConditionTrue) || - !c.checkOperatorStatusCondition(co, configv1.OperatorDegraded, configv1.ConditionFalse) { - return false + for index := range handlers { + if c.isOperatorAvailable(handlers[index]) { + delete(handlers, index) + } } - - c.log.Infof("%s operator is available in cluster", operatorName) - - return true - - } - - return false -} - -func (c controller) isOperatorAvailableInService(operatorName string) bool { - operatorStatusInService, err := c.ic.GetClusterMonitoredOperator(utils.GenerateRequestContext(), c.ClusterID, operatorName) - if err != nil { - c.log.WithError(err).Errorf("Failed to get cluster %s %s operator status", c.ClusterID, operatorName) return false } - if operatorStatusInService.Status == models.OperatorStatusAvailable { - c.log.Infof("Service acknowledged %s operator is available for cluster %s", operatorName, c.ClusterID) - return true - } - - return false + waitTimeout := c.getMaximumOLMTimeout(operators) + c.log.Infof("Waiting for OLM operators for %v", waitTimeout) + return utils.WaitForPredicateWithContext(ctx, waitTimeout, GeneralWaitInterval, areOLMOperatorsAvailable) } // validateConsoleAvailability checks if the console operator is available func (c controller) validateConsoleAvailability() bool { - return c.isOperatorAvailableInCluster(consoleOperatorName) && - c.isOperatorAvailableInService(consoleOperatorName) + return c.isOperatorAvailable(NewClusterOperatorHandler(c.kc, consoleOperatorName)) } // waitingForClusterVersion checks the Cluster Version Operator availability in the // new OCP cluster. A success would be announced only when the service acknowledges // the CVO availability, in order to avoid unsycned scenarios. -// -// This function would be aligned with the console operator reporting workflow -// as part of the deprecation of the old API in MGMT-5188. func (c controller) waitingForClusterVersion(ctx context.Context) error { isClusterVersionAvailable := func(timer *time.Timer) bool { - c.log.Infof("Checking cluster version operator availability status") - co, err := c.kc.GetClusterVersion("version") - if err != nil { - c.log.WithError(err).Warn("Failed to get cluster version operator") - return false - } - - cvoStatusInService, err := c.ic.GetClusterMonitoredOperator(utils.GenerateRequestContext(), c.ClusterID, cvoOperatorName) - if err != nil { - c.log.WithError(err).Errorf("Failed to get cluster %s cvo status", c.ClusterID) - return false - } - - if cvoStatusInService.Status == models.OperatorStatusAvailable { - c.log.Infof("Service acknowledged CVO is available for cluster %s", c.ClusterID) - return true - } - - operatorStatus, operatorMessage := utils.ClusterOperatorConditionsToMonitoredOperatorStatus(co.Status.Conditions) - if cvoStatusInService.Status != operatorStatus || (cvoStatusInService.StatusInfo != operatorMessage && operatorMessage != "") { - // This is a common pattern to ensure the channel is empty after a stop has been called - // More info on time/sleep.go documentation - if !timer.Stop() { - <-timer.C - } - timer.Reset(WaitTimeout) - - c.log.Infof("CVO updated, status: %s -> %s, message: %s -> %s.", cvoStatusInService.Status, operatorStatus, cvoStatusInService.StatusInfo, operatorMessage) - - // Update built-in monitored operator cluster version status - if err := c.ic.UpdateClusterOperator(utils.GenerateRequestContext(), c.ClusterID, cvoOperatorName, operatorStatus, operatorMessage); err != nil { - c.log.WithError(err).Errorf("Failed to update cluster %s cvo status", c.ClusterID) - } - } - - return false + return c.isOperatorAvailable(NewClusterVersionHandler(c.kc, timer)) } return utils.WaitForPredicateWithTimer(ctx, WaitTimeout, GeneralProgressUpdateInt, isClusterVersionAvailable) @@ -1009,7 +896,6 @@ func (c controller) uploadSummaryLogs(podName string, namespace string, sinceSec func (c controller) downloadKubeconfigNoingress(ctx context.Context, dir string) (string, error) { // Download kubeconfig file - kubeconfigFileName := "kubeconfig-noingress" kubeconfigPath := path.Join(dir, kubeconfigFileName) err := c.ic.DownloadFile(ctx, kubeconfigFileName, kubeconfigPath) if err != nil { @@ -1126,21 +1012,3 @@ func (c controller) SetReadyState() { return true }) } - -// checkOperatorStatusCondition checks if given operator has a condition with an expected status. -func (c controller) checkOperatorStatusCondition(co *configv1.ClusterOperator, - conditionType configv1.ClusterStatusConditionType, - status configv1.ConditionStatus) bool { - for _, condition := range co.Status.Conditions { - if condition.Type == conditionType { - if condition.Status == status { - return true - } - c.log.Warnf("Operator %s condition '%s' is not met due to '%s': %s", - co.Name, conditionType, condition.Reason, condition.Message) - return false - } - } - c.log.Warnf("Operator %s condition '%s' does not exist", co.Name, conditionType) - return false -} diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 1da15cbc2..151c55772 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/google/uuid" metal3v1alpha1 "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1" "github.com/openshift/assisted-installer/src/common" machinev1beta1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" @@ -90,7 +91,6 @@ var _ = Describe("installer HostRoleMaster role", func() { kubeNamesIds = map[string]string{"node0": "6d6f00e8-70dd-48a5-859a-0f1459485ad9", "node1": "2834ff2e-8965-48a5-859a-0f1459485a77", "node2": "57df89ee-3546-48a5-859a-0f1459485a66"} - l.SetOutput(ioutil.Discard) BeforeEach(func() { ctrl = gomock.NewController(GinkgoT()) @@ -154,11 +154,24 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().ClusterLogProgressReport(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() } + mockGetServiceOperators := func(operators []models.MonitoredOperator) { + for index := range operators { + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), operators[index].Name).Return(&operators[index], nil).Times(1) + } + } + + mockGetCSV := func(operator models.MonitoredOperator, csv *olmv1alpha1.ClusterServiceVersion) { + randomCSV := uuid.New().String() + mockk8sclient.EXPECT().GetCSVFromSubscription(operator.Namespace, operator.SubscriptionName).Return(randomCSV, nil).Times(1) + mockk8sclient.EXPECT().GetCSV(operator.Namespace, randomCSV).Return(csv, nil).Times(1) + } + setConsoleAsAvailable := func(clusterID string) { + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return(validConsoleOperator, nil).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), clusterID, consoleOperatorName).Return(&models.MonitoredOperator{Status: models.OperatorStatusProgressing}, nil).Times(1) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), clusterID, consoleOperatorName, models.OperatorStatusAvailable, gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), clusterID, consoleOperatorName).Return(&models.MonitoredOperator{Status: models.OperatorStatusAvailable}, nil).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusAvailable}}) } setClusterAsFinalizing := func() { @@ -200,6 +213,16 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().GetServiceNetworks().Return([]string{"10.56.20.0/24"}, nil) } + mockGetOLMOperators := func(operators []models.MonitoredOperator) { + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return(operators, nil).Times(1) + } + + mockApplyPostInstallManifests := func() { + mockbmclient.EXPECT().DownloadFile(gomock.Any(), customManifestsFile, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().DownloadFile(gomock.Any(), kubeconfigFileName, gomock.Any()).Return(nil).Times(1) + mockops.EXPECT().CreateManifests(gomock.Any(), gomock.Any()).Return(nil).Times(1) + } + Context("Waiting for 3 nodes", func() { It("Set ready event", func() { // fail to connect to assisted and then succeed @@ -488,73 +511,95 @@ var _ = Describe("installer HostRoleMaster role", func() { It("success", func() { installing := models.ClusterStatusInstalling - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: &installing}, nil).Times(1) setClusterAsFinalizing() uploadIngressCert(assistedController.ClusterID) // Console - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return(nil, fmt.Errorf("no-operator")).Times(1) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - &configv1.ClusterOperator{ - Status: configv1.ClusterOperatorStatus{ - Conditions: []configv1.ClusterOperatorStatusCondition{}, - }, - }, fmt.Errorf("no-conditions")).Times(1) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithCondition(configv1.OperatorDegraded, configv1.ConditionFalse), - fmt.Errorf("false-degraded-condition")).Times(1) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionTrue), - fmt.Errorf("missing-degraded-condition")).Times(1) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionFalse), - fmt.Errorf("false-available-condition")).Times(1) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionTrue), - fmt.Errorf("true-degraded-condition")).Times(1) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - &configv1.ClusterOperator{ - Status: configv1.ClusterOperatorStatus{ - Conditions: []configv1.ClusterOperatorStatusCondition{ - {Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse}, + By("console errors", func() { + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return(nil, fmt.Errorf("no-operator")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + &configv1.ClusterOperator{ + Status: configv1.ClusterOperatorStatus{ + Conditions: []configv1.ClusterOperatorStatusCondition{}, }, - }, - }, fmt.Errorf("missing-conditions")).Times(1) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithConditionsStatus(configv1.ConditionTrue, configv1.ConditionTrue), - fmt.Errorf("bad-conditions-status")).Times(1) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithConditionsStatus(configv1.ConditionFalse, configv1.ConditionTrue), - fmt.Errorf("bad-conditions-status")).Times(1) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithConditionsStatus(configv1.ConditionFalse, configv1.ConditionFalse), - fmt.Errorf("bad-conditions-status")).Times(1) + }, fmt.Errorf("no-conditions")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithCondition(configv1.OperatorDegraded, configv1.ConditionFalse), + fmt.Errorf("false-degraded-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionTrue), + fmt.Errorf("missing-degraded-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionFalse), + fmt.Errorf("false-available-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionTrue), + fmt.Errorf("true-degraded-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + &configv1.ClusterOperator{ + Status: configv1.ClusterOperatorStatus{ + Conditions: []configv1.ClusterOperatorStatusCondition{ + {Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse}, + }, + }, + }, fmt.Errorf("missing-conditions")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithConditionsStatus(configv1.ConditionTrue, configv1.ConditionTrue), + fmt.Errorf("bad-conditions-status")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithConditionsStatus(configv1.ConditionFalse, configv1.ConditionTrue), + fmt.Errorf("bad-conditions-status")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithConditionsStatus(configv1.ConditionFalse, configv1.ConditionFalse), + fmt.Errorf("bad-conditions-status")).Times(1) + }) + setConsoleAsAvailable("cluster-id") // Patching NS mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) // CVO - mockk8sclient.EXPECT().GetClusterVersion("version").Return(nil, fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). + Return(&models.MonitoredOperator{Status: "", StatusInfo: ""}, nil).Times(1) + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(nil, fmt.Errorf("dummy")).Times(1) - mockk8sclient.EXPECT().GetClusterVersion("version").Return(progressClusterVersionCondition, nil).Times(1) mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). Return(&models.MonitoredOperator{Status: "", StatusInfo: ""}, nil).Times(1) + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(progressClusterVersionCondition, nil).Times(1) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, models.OperatorStatusProgressing, progressClusterVersionCondition.Status.Conditions[0].Message).Times(1) - mockk8sclient.EXPECT().GetClusterVersion("version").Return(availableClusterVersionCondition, nil).Times(2) mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). Return(&models.MonitoredOperator{Status: models.OperatorStatusProgressing, StatusInfo: progressClusterVersionCondition.Status.Conditions[0].Message}, nil).Times(1) + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(availableClusterVersionCondition, nil).Times(1) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, models.OperatorStatusAvailable, availableClusterVersionCondition.Status.Conditions[0].Message).Times(1) + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). Return(&models.MonitoredOperator{Status: models.OperatorStatusAvailable, StatusInfo: availableClusterVersionCondition.Status.Conditions[0].Message}, nil).Times(1) // Completion - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "custom_manifests.yaml", gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "kubeconfig-noingress", gomock.Any()).Return(nil).Times(1) - mockops.EXPECT().CreateManifests(gomock.Any(), gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return([]models.MonitoredOperator{}, nil).Times(2) + mockApplyPostInstallManifests() + mockGetOLMOperators([]models.MonitoredOperator{}) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) @@ -585,15 +630,10 @@ var _ = Describe("installer HostRoleMaster role", func() { }) It("success", func() { installing := models.ClusterStatusInstalling - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "custom_manifests.yaml", gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "kubeconfig-noingress", gomock.Any()).Return(nil).Times(1) - mockops.EXPECT().CreateManifests(gomock.Any(), gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: &installing}, nil).Times(1) - setClusterAsFinalizing() - uploadIngressCert(assistedController.ClusterID) - setConsoleAsAvailable("cluster-id") - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return([]models.MonitoredOperator{}, nil).AnyTimes() + setControllerWaitForOLMOperators(assistedController.ClusterID) + mockGetOLMOperators([]models.MonitoredOperator{}) + mockApplyPostInstallManifests() mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) @@ -629,23 +669,42 @@ var _ = Describe("installer HostRoleMaster role", func() { }) It("waiting for single OLM operator", func() { - setControllerWaitForOLMOperators(assistedController.ClusterID) + By("setup", func() { + setControllerWaitForOLMOperators(assistedController.ClusterID) + mockApplyPostInstallManifests() + mockGetOLMOperators([]models.MonitoredOperator{ + {SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: "", TimeoutSeconds: 120 * 60}, + }) + }) + + By("empty status", func() { + mockGetServiceOperators([]models.MonitoredOperator{{Name: "lso", Status: ""}}) + mockGetCSV( + models.MonitoredOperator{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso"}, + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + }) + + By("in progress", func() { + mockGetServiceOperators([]models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso", Status: models.OperatorStatusProgressing}}) + mockGetCSV( + models.MonitoredOperator{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso"}, + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusProgressing, gomock.Any()).Return(nil).Times(1) + }) + + By("available", func() { + mockGetServiceOperators([]models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso", Status: models.OperatorStatusProgressing}}) + mockGetCSV( + models.MonitoredOperator{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso"}, + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseSucceeded}}, + ) + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusAvailable, gomock.Any()).Return(nil).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", Name: "lso", Status: models.OperatorStatusAvailable}}) + }) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "custom_manifests.yaml", gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "kubeconfig-noingress", gomock.Any()).Return(nil).Times(1) - mockops.EXPECT().CreateManifests(gomock.Any(), gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: "", TimeoutSeconds: 120 * 60}}, nil, - ).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 120 * 60}}, nil, - ).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusAvailable, TimeoutSeconds: 120 * 60}}, nil, - ).Times(1) - mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) - mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{}, nil).Times(1) - mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", gomock.Any(), gomock.Any()).Return(nil).AnyTimes() mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) @@ -657,18 +716,23 @@ var _ = Describe("installer HostRoleMaster role", func() { wg.Wait() Expect(status.HasError()).Should(Equal(false)) }) + It("waiting for single OLM operator which timeouts", func() { - setControllerWaitForOLMOperators(assistedController.ClusterID) + By("setup", func() { + setControllerWaitForOLMOperators(assistedController.ClusterID) + mockApplyPostInstallManifests() + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( + []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 0}}, nil, + ).AnyTimes() + }) + + By("endless empty status", func() { + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), "lso").Return(&models.MonitoredOperator{Name: "lso", Status: ""}, nil).AnyTimes() + mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).AnyTimes() + mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, nil).AnyTimes() + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusProgressing, gomock.Any()).Return(nil).AnyTimes() + }) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "custom_manifests.yaml", gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), "kubeconfig-noingress", gomock.Any()).Return(nil).Times(1) - mockops.EXPECT().CreateManifests(gomock.Any(), gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 1}}, nil, - ).AnyTimes() - mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).AnyTimes() - mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, nil).AnyTimes() - mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusProgressing, gomock.Any()).Return(nil).AnyTimes() mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusFailed, "Waiting for operator timed out").Return(nil).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) @@ -933,114 +997,191 @@ var _ = Describe("installer HostRoleMaster role", func() { Context("getMaximumOLMTimeout", func() { It("Return general timeout if no OLM's present", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return([]models.MonitoredOperator{}, nil).Times(1) - Expect(assistedController.getMaximumOLMTimeout()).To(Equal(WaitTimeout)) - }) - - It("Return general timeout if assisted service is not reacheble", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("error")).Times(1) - Expect(assistedController.getMaximumOLMTimeout()).To(Equal(WaitTimeout)) + opertors := []*models.MonitoredOperator{} + Expect(assistedController.getMaximumOLMTimeout(opertors)).To(Equal(WaitTimeout)) }) It("Return general timeout if OLM's timeout is lower", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return([]models.MonitoredOperator{}, nil).Times(1) - Expect(assistedController.getMaximumOLMTimeout()).To(Equal(WaitTimeout)) + opertors := []*models.MonitoredOperator{ + { + TimeoutSeconds: 0, + }, + } + + Expect(assistedController.getMaximumOLMTimeout(opertors)).To(Equal(WaitTimeout)) }) It("Return maximum from multiple OLM's", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{ - {OperatorType: models.OperatorTypeOlm, TimeoutSeconds: 120 * 60}, - {OperatorType: models.OperatorTypeOlm, TimeoutSeconds: 130 * 60}, - }, nil, - ).Times(1) - Expect(assistedController.getMaximumOLMTimeout()).To(Equal(130 * 60 * time.Second)) + opertors := []*models.MonitoredOperator{ + {OperatorType: models.OperatorTypeOlm, TimeoutSeconds: 120 * 60}, + {OperatorType: models.OperatorTypeOlm, TimeoutSeconds: 130 * 60}, + } + Expect(assistedController.getMaximumOLMTimeout(opertors)).To(Equal(130 * 60 * time.Second)) }) }) Context("waitForOLMOperators", func() { - It("Don't wait if OLM operators list is empty", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{}, nil, - ).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(true)) - }) - It("Don't wait if OLM operator available", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{Status: models.OperatorStatusAvailable, OperatorType: models.OperatorTypeOlm}}, nil, - ).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(true)) + var ( + operatorName = "lso" + subscriptionName = "local-storage-operator" + namespaceName = "openshift-local-storage" + ) + + BeforeEach(func() { + GeneralWaitInterval = 100 * time.Millisecond + WaitTimeout = 150 * time.Millisecond }) - It("Don't wait if OLM operator failed", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{Status: models.OperatorStatusFailed, OperatorType: models.OperatorTypeOlm}}, nil, - ).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(true)) + + It("List is empty", func() { + mockGetOLMOperators([]models.MonitoredOperator{}) + Expect(assistedController.waitForOLMOperators(context.TODO())).To(BeNil()) }) - It("Wait if OLM operator progressing and k8s unavailable", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{Name: "lso", Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm}}, nil, - ).Times(1) + It("k8s unavailable", func() { + operators := []models.MonitoredOperator{{Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm}} + mockGetOLMOperators(operators) + mockGetServiceOperators(operators) mockk8sclient.EXPECT().GetCSVFromSubscription(gomock.Any(), gomock.Any()).Return("", fmt.Errorf("Error")).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(false)) + Expect(assistedController.waitForOLMOperators(context.TODO())).To(HaveOccurred()) }) - It("Wait if OLM operator progressing - no update (empty message)", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", - Name: "lso", Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm}}, nil, - ).Times(1) - mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) - mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, nil).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(false)) + It("progressing - no update (empty message)", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + }, + } + + mockGetOLMOperators(operators) + mockGetServiceOperators(operators) + mockGetCSV( + operators[0], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + Expect(assistedController.waitForOLMOperators(context.TODO())).To(HaveOccurred()) }) - It("Wait if OLM operator progressing - no update (same message)", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", - Name: "lso", Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, - StatusInfo: "same"}}, nil, - ).Times(1) - mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) - mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{ - Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling, Message: "same"}}, nil).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(false)) + It("progressing - no update (same message)", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + StatusInfo: "same", + }, + } + + mockGetOLMOperators(operators) + mockGetServiceOperators(operators) + mockGetCSV( + operators[0], + &olmv1alpha1.ClusterServiceVersion{ + Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling, Message: "same"}, + }, + ) + Expect(assistedController.waitForOLMOperators(context.TODO())).To(HaveOccurred()) }) - It("Wait if OLM operator progressing - update (new message)", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", - Name: "lso", Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, - StatusInfo: "old"}}, nil, - ).Times(1) - mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) - mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{ - Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling, Message: "new"}}, nil).Times(1) - mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", gomock.Any(), gomock.Any()).Return(nil).Times(1) - Expect(assistedController.waitForOLMOperators()).To(Equal(false)) + It("progressing - update (new message)", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + StatusInfo: "old", + }, + } + + mockGetOLMOperators(operators) + mockGetServiceOperators(operators) + mockGetCSV( + operators[0], + &olmv1alpha1.ClusterServiceVersion{ + Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling, Message: "new"}, + }, + ) + + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", operatorName, gomock.Any(), gomock.Any()).Return(nil).Times(1) + Expect(assistedController.waitForOLMOperators(context.TODO())).To(HaveOccurred()) }) It("check that we tolerate the failed state reported by CSV", func() { - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 1}}, nil, - ).Times(1) - mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) - mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseFailed}}, nil).Times(1) - - Expect(assistedController.waitForOLMOperators()).To(Equal(false)) - Expect(assistedController.retryMap["lso"]).To(Equal(1)) - - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 1}}, nil, - ).Times(1) - mockk8sclient.EXPECT().GetCSVFromSubscription("openshift-local-storage", "local-storage-operator").Return("lso-1.1", nil).Times(1) - mockk8sclient.EXPECT().GetCSV("openshift-local-storage", "lso-1.1").Return(&olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseSucceeded}}, nil).Times(1) - mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusAvailable, gomock.Any()).Return(nil).Times(1) - - Expect(assistedController.waitForOLMOperators()).To(Equal(false)) - Expect(assistedController.retryMap["lso"]).To(Equal(1)) - - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusAvailable, TimeoutSeconds: 1}}, nil, - ).Times(1) - - Expect(assistedController.waitForOLMOperators()).To(Equal(true)) + WaitTimeout = WaitTimeout * 10 + + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + OperatorType: models.OperatorTypeOlm, Name: operatorName, Status: models.OperatorStatusProgressing, TimeoutSeconds: 1, + }, + } + + mockGetOLMOperators(operators) + + mockGetServiceOperators(operators) + mockGetCSV( + operators[0], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseFailed}}, + ) + + mockGetServiceOperators(operators) + mockGetCSV( + operators[0], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseSucceeded}}, + ) + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), operatorName, models.OperatorStatusAvailable, gomock.Any()).Return(nil).Times(1) + + newOperators := make([]models.MonitoredOperator, 0) + newOperators = append(newOperators, operators...) + newOperators[0].Status = models.OperatorStatusAvailable + mockGetServiceOperators(newOperators) + Expect(assistedController.waitForOLMOperators(context.TODO())).To(BeNil()) + }) + + It("multiple OLMs", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: "subscription-1", Namespace: "namespace-1", + OperatorType: models.OperatorTypeOlm, Name: "operator-1", Status: models.OperatorStatusProgressing, TimeoutSeconds: 120 * 60, + }, + { + SubscriptionName: "subscription-2", Namespace: "namespace-2", + OperatorType: models.OperatorTypeOlm, Name: "operator-2", Status: models.OperatorStatusProgressing, TimeoutSeconds: 120 * 60, + }, + { + SubscriptionName: "subscription-3", Namespace: "namespace-3", + OperatorType: models.OperatorTypeOlm, Name: "operator-3", Status: models.OperatorStatusProgressing, TimeoutSeconds: 120 * 60, + }, + } + + mockGetOLMOperators(operators) + + By("first is available", func() { + newOperators := make([]models.MonitoredOperator, 0) + newOperators = append(newOperators, operators...) + newOperators[0].Status = models.OperatorStatusAvailable + mockGetServiceOperators(newOperators) + + mockGetCSV( + newOperators[1], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + mockGetCSV( + newOperators[2], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + }) + + By("last is available", func() { + newerOperators := make([]models.MonitoredOperator, 0) + newerOperators = append(newerOperators, operators[1], operators[2]) + newerOperators[1].Status = models.OperatorStatusAvailable + mockGetServiceOperators(newerOperators) + + mockGetCSV( + newerOperators[0], + &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, + ) + }) + + lastOne := []models.MonitoredOperator{operators[1]} + lastOne[0].Status = models.OperatorStatusAvailable + mockGetServiceOperators(lastOne) + + Expect(assistedController.waitForOLMOperators(context.TODO())).To(BeNil()) }) }) @@ -1131,23 +1272,21 @@ var _ = Describe("installer HostRoleMaster role", func() { StatusInfo: t.newCVOCondition.Message, } - amountOfSamples := 1 - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(t.currentServiceCVOStatus, nil).Times(1) if t.shouldSendUpdate { if t.currentServiceCVOStatus.Status != models.OperatorStatusAvailable { - // If a change occured and it is still false - we expect the timer to be resetted, - // hence another round would happen. - amountOfSamples += 1 - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(newServiceCVOStatus, nil).Times(1) } mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, gomock.Any(), gomock.Any()).Times(1) } - mockk8sclient.EXPECT().GetClusterVersion("version").Return(clusterVersionReport, nil).Times(amountOfSamples) + amountOfSamples := 0 + if t.currentServiceCVOStatus.Status != models.OperatorStatusAvailable { + amountOfSamples++ + } + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(clusterVersionReport, nil).MinTimes(amountOfSamples) if newServiceCVOStatus.Status == models.OperatorStatusAvailable { Expect(assistedController.waitingForClusterVersion(ctx)).ShouldNot(HaveOccurred()) @@ -1167,7 +1306,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }, } - mockk8sclient.EXPECT().GetClusterVersion("version").Return(clusterVersionReport, nil).AnyTimes() + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(clusterVersionReport, nil).AnyTimes() mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(currentServiceCVOStatus, nil).AnyTimes() mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, gomock.Any(), gomock.Any()).AnyTimes() @@ -1192,7 +1331,7 @@ var _ = Describe("installer HostRoleMaster role", func() { } // Fail twice - mockk8sclient.EXPECT().GetClusterVersion("version").Return(clusterVersionReport, nil).Times(3) + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(clusterVersionReport, nil).Times(2) mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(currentServiceCVOStatus, nil).Times(2) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, gomock.Any(), gomock.Any()).Times(2) diff --git a/src/assisted_installer_controller/operator_handler.go b/src/assisted_installer_controller/operator_handler.go new file mode 100644 index 000000000..89320a34d --- /dev/null +++ b/src/assisted_installer_controller/operator_handler.go @@ -0,0 +1,162 @@ +package assisted_installer_controller + +import ( + "context" + "time" + + "github.com/openshift/assisted-installer/src/k8s_client" + "github.com/openshift/assisted-installer/src/utils" + "github.com/openshift/assisted-service/models" +) + +const ( + cvoOperatorName = "cvo" + clusterVersionName = "version" +) + +type OperatorHandler interface { + GetName() string + GetStatus() (models.OperatorStatus, string, error) + OnChange(newStatus models.OperatorStatus) bool +} + +func (c controller) isOperatorAvailable(handler OperatorHandler) bool { + operatorName := handler.GetName() + c.log.Infof("Checking <%s> operator availability status", operatorName) + + operatorStatusInService, isAvailable := c.isOperatorAvailableInService(operatorName) + if isAvailable { + return true + } + + operatorStatus, operatorMessage, err := handler.GetStatus() + if err != nil { + c.log.WithError(err).Warnf("Failed to get <%s> operator", operatorName) + return false + } + + if operatorStatusInService.Status != operatorStatus || (operatorStatusInService.StatusInfo != operatorMessage && operatorMessage != "") { + c.log.Infof("Operator <%s> updated, status: %s -> %s, message: %s -> %s.", operatorName, operatorStatusInService.Status, operatorStatus, operatorStatusInService.StatusInfo, operatorMessage) + if !handler.OnChange(operatorStatus) { + c.log.WithError(err).Warnf("%s operator on change failed", operatorName) + return false + } + + err = c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operatorName, operatorStatus, operatorMessage) + if err != nil { + c.log.WithError(err).Warnf("Failed to update %s operator status %s with message %s", operatorName, operatorStatus, operatorMessage) + return false + } + } + + return false +} + +func (c controller) isOperatorAvailableInService(operatorName string) (*models.MonitoredOperator, bool) { + operatorStatusInService, err := c.ic.GetClusterMonitoredOperator(utils.GenerateRequestContext(), c.ClusterID, operatorName) + if err != nil { + c.log.WithError(err).Errorf("Failed to get cluster %s %s operator status", c.ClusterID, operatorName) + return nil, false + } + + if operatorStatusInService.Status == models.OperatorStatusAvailable { + c.log.Infof("Service acknowledged <%s> operator is available for cluster %s", operatorName, c.ClusterID) + return operatorStatusInService, true + } + + return operatorStatusInService, false +} + +type ClusterOperatorHandler struct { + kc k8s_client.K8SClient + operatorName string +} + +func NewClusterOperatorHandler(kc k8s_client.K8SClient, operatorName string) *ClusterOperatorHandler { + return &ClusterOperatorHandler{kc: kc, operatorName: operatorName} +} + +func (handler ClusterOperatorHandler) GetName() string { return handler.operatorName } + +func (handler ClusterOperatorHandler) GetStatus() (models.OperatorStatus, string, error) { + co, err := handler.kc.GetClusterOperator(handler.operatorName) + if err != nil { + return "", "", err + } + + operatorStatus, operatorMessage := utils.ClusterOperatorConditionsToMonitoredOperatorStatus(co.Status.Conditions) + return operatorStatus, operatorMessage, nil +} + +func (handler ClusterOperatorHandler) OnChange(_ models.OperatorStatus) bool { return true } + +type ClusterVersionHandler struct { + kc k8s_client.K8SClient + timer *time.Timer +} + +func NewClusterVersionHandler(kc k8s_client.K8SClient, timer *time.Timer) *ClusterVersionHandler { + return &ClusterVersionHandler{kc: kc, timer: timer} +} + +func (handler ClusterVersionHandler) GetName() string { return cvoOperatorName } + +func (handler ClusterVersionHandler) GetStatus() (models.OperatorStatus, string, error) { + co, err := handler.kc.GetClusterVersion(clusterVersionName) + if err != nil { + return "", "", err + } + + operatorStatus, operatorMessage := utils.ClusterOperatorConditionsToMonitoredOperatorStatus(co.Status.Conditions) + return operatorStatus, operatorMessage, nil +} + +func (handler ClusterVersionHandler) OnChange(_ models.OperatorStatus) bool { + // This is a common pattern to ensure the channel is empty after a stop has been called + // More info on time/sleep.go documentation + if !handler.timer.Stop() { + <-handler.timer.C + } + handler.timer.Reset(WaitTimeout) + + return true +} + +type ClusterServiceVersionHandler struct { + kc k8s_client.K8SClient + operator *models.MonitoredOperator + retries int +} + +func NewClusterServiceVersionHandler(kc k8s_client.K8SClient, operator *models.MonitoredOperator) *ClusterServiceVersionHandler { + return &ClusterServiceVersionHandler{kc: kc, operator: operator, retries: 0} +} + +func (handler ClusterServiceVersionHandler) GetName() string { return handler.operator.Name } + +func (handler ClusterServiceVersionHandler) GetStatus() (models.OperatorStatus, string, error) { + csvName, err := handler.kc.GetCSVFromSubscription(handler.operator.Namespace, handler.operator.SubscriptionName) + if err != nil { + return "", "", err + } + + csv, err := handler.kc.GetCSV(handler.operator.Namespace, csvName) + if err != nil { + return "", "", err + } + + operatorStatus := utils.CsvStatusToOperatorStatus(string(csv.Status.Phase)) + + return operatorStatus, csv.Status.Message, nil +} + +func (handler ClusterServiceVersionHandler) OnChange(newStatus models.OperatorStatus) bool { + // FIXME: We retry the check of the operator status in case it's in failed state to WA bug 1968606 + // Remove this code when bug 1968606 is fixed + if utils.IsStatusFailed(newStatus) && handler.retries < failedOperatorRetry { + handler.retries++ + return false + } + + return true +} From 90c1e8da2c5cb49bfb96a9f9c3c9d8d5b875cbc5 Mon Sep 17 00:00:00 2001 From: Ondra Machacek Date: Tue, 20 Jul 2021 16:28:55 +0200 Subject: [PATCH 10/43] OCPBUGSM-31942: Return error on failed status operator update (#332) The method call getProgressingOLMOperators could return error in case the assisted service API is unavailable. In that case we wouldn't update the status of the operators which are pending and it may happend that the cluster will hang in finalizing state. --- .../assisted_installer_controller.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index e084235d8..9dcaf359f 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -742,7 +742,11 @@ func (c controller) getProgressingOLMOperators() ([]*models.MonitoredOperator, e func (c controller) updatePendingOLMOperators() error { c.log.Infof("Updating pending OLM operators") - operators, _ := c.getProgressingOLMOperators() + operators, err := c.getProgressingOLMOperators() + if err != nil { + return err + } + for _, operator := range operators { err := c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operator.Name, models.OperatorStatusFailed, "Waiting for operator timed out") if err != nil { From a1057b1ef1c39c60285d4277435d32068230613c Mon Sep 17 00:00:00 2001 From: slavie Date: Wed, 21 Jul 2021 18:08:11 +0300 Subject: [PATCH 11/43] MGMT-4893: Add Must-Gather reports when olm controllers fail (#329) * MGMT-4893: Add Must-Gather reports when olm controllers fail - Add support for JSON-formatted MUST_GATHER_IMAGE variable - Backward compatability with other formats of MUST_GATHER_IMAGE - When one of the Olm operator fails or timesout it is marked on the controller status - At the end of the installation process (either normal or aborted) we check if must-gather report should be collected and with what scope * NO-ISSUE: correcting typo in log Co-authored-by: Yuval Goldberg Co-authored-by: Yuval Goldberg --- .../assisted_installer_controller.go | 102 +++++++++++++++--- .../assisted_installer_controller_test.go | 72 +++++++++---- .../operator_handler.go | 20 ++-- .../assisted_installer_main.go | 7 +- .../assisted_installer_main_test.go | 2 +- src/ops/mock_ops.go | 17 +-- src/ops/ops.go | 15 ++- 7 files changed, 167 insertions(+), 68 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index 9dcaf359f..47de57ccb 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -86,14 +86,17 @@ type Controller interface { type ControllerStatus struct { errCounter uint32 + components map[string]bool + lock sync.Mutex } type controller struct { ControllerConfig - log *logrus.Logger - ops ops.Ops - ic inventory_client.InventoryClient - kc k8s_client.K8SClient + Status *ControllerStatus + log *logrus.Logger + ops ops.Ops + ic inventory_client.InventoryClient + kc k8s_client.K8SClient } func NewController(log *logrus.Logger, cfg ControllerConfig, ops ops.Ops, ic inventory_client.InventoryClient, kc k8s_client.K8SClient) *controller { @@ -103,6 +106,13 @@ func NewController(log *logrus.Logger, cfg ControllerConfig, ops ops.Ops, ic inv ops: ops, ic: ic, kc: kc, + Status: NewControllerStatus(), + } +} + +func NewControllerStatus() *ControllerStatus { + return &ControllerStatus{ + components: make(map[string]bool), } } @@ -114,6 +124,28 @@ func (status *ControllerStatus) HasError() bool { return atomic.LoadUint32(&status.errCounter) > 0 } +func (status *ControllerStatus) OperatorError(component string) { + status.lock.Lock() + defer status.lock.Unlock() + status.components[component] = true +} + +func (status *ControllerStatus) HasOperatorError() bool { + status.lock.Lock() + defer status.lock.Unlock() + return len(status.components) > 0 +} + +func (status *ControllerStatus) GetOperatorsInError() []string { + result := make([]string, 0) + status.lock.Lock() + defer status.lock.Unlock() + for op := range status.components { + result = append(result, op) + } + return result +} + func logHostsStatus(log logrus.FieldLogger, hosts map[string]inventory_client.HostData) { hostsStatus := make(map[string][]string) for hostname, hostData := range hosts { @@ -343,7 +375,7 @@ func isCsrApproved(csr *certificatesv1.CertificateSigningRequest) bool { return false } -func (c controller) PostInstallConfigs(ctx context.Context, wg *sync.WaitGroup, status *ControllerStatus) { +func (c controller) PostInstallConfigs(ctx context.Context, wg *sync.WaitGroup) { defer func() { c.log.Infof("Finished PostInstallConfigs") wg.Done() @@ -371,7 +403,7 @@ func (c controller) PostInstallConfigs(ctx context.Context, wg *sync.WaitGroup, if err != nil { c.log.Error(err) errMessage = err.Error() - status.Error() + c.Status.Error() } success := err == nil c.sendCompleteInstallation(ctx, success, errMessage) @@ -742,13 +774,14 @@ func (c controller) getProgressingOLMOperators() ([]*models.MonitoredOperator, e func (c controller) updatePendingOLMOperators() error { c.log.Infof("Updating pending OLM operators") + ctx := utils.GenerateRequestContext() operators, err := c.getProgressingOLMOperators() if err != nil { return err } - for _, operator := range operators { - err := c.ic.UpdateClusterOperator(context.TODO(), c.ClusterID, operator.Name, models.OperatorStatusFailed, "Waiting for operator timed out") + c.Status.OperatorError(operator.Name) + err := c.ic.UpdateClusterOperator(ctx, c.ClusterID, operator.Name, models.OperatorStatusFailed, "Waiting for operator timed out") if err != nil { c.log.WithError(err).Warnf("Failed to update olm %s status", operator.Name) return err @@ -770,7 +803,7 @@ func (c controller) waitForOLMOperators(ctx context.Context) error { handlers := make(map[string]*ClusterServiceVersionHandler) for index := range operators { - handlers[operators[index].Name] = NewClusterServiceVersionHandler(c.kc, operators[index]) + handlers[operators[index].Name] = NewClusterServiceVersionHandler(c.kc, operators[index], c.Status) } areOLMOperatorsAvailable := func() bool { @@ -835,20 +868,21 @@ func (c controller) logClusterOperatorsStatus() { /** * This function upload the following logs at once to the service at the end of the installation process - * It takes a linient approach so if some logs are not available it ignores them and moves on + * It takes a lenient approach so if some logs are not available it ignores them and moves on * currently the bundled logs are: * - controller logs * - oc must-gather logs **/ -func (c controller) uploadSummaryLogs(podName string, namespace string, sinceSeconds int64, isMustGatherEnabled bool, mustGatherImg string) error { +func (c controller) uploadSummaryLogs(podName string, namespace string, sinceSeconds int64) error { var tarentries = make([]utils.TarEntry, 0) var ok bool = true ctx := utils.GenerateRequestContext() c.logClusterOperatorsStatus() - if isMustGatherEnabled { + if c.Status.HasError() || c.Status.HasOperatorError() { c.log.Infof("Uploading oc must-gather logs") - if tarfile, err := c.collectMustGatherLogs(ctx, mustGatherImg); err == nil { + images := c.parseMustGatherImages() + if tarfile, err := c.collectMustGatherLogs(ctx, images...); err == nil { if entry, tarerr := utils.NewTarEntryFromFile(tarfile); tarerr == nil { tarentries = append(tarentries, *entry) } @@ -898,6 +932,40 @@ func (c controller) uploadSummaryLogs(podName string, namespace string, sinceSec return nil } +func (c controller) parseMustGatherImages() []string { + images := make([]string, 0) + if c.MustGatherImage == "" { + c.log.Infof("collecting must-gather logs into using image from release") + return images + } + + c.log.Infof("collecting must-gather logs using this image configuration %s", c.MustGatherImage) + var imageMap map[string]string + err := json.Unmarshal([]byte(c.MustGatherImage), &imageMap) + if err != nil { + //MustGatherImage is not a JSON. Pass it as is + images = append(images, c.MustGatherImage) + return images + } + + //Use the parsed MustGatherImage to find the images needed for collecting + //the information + if c.Status.HasError() { + //general error - collect all data from the cluster using the standard image + images = append(images, imageMap["ocp"]) + } + + for _, op := range c.Status.GetOperatorsInError() { + if imageMap[op] != "" { + //per failed operator - add feature image for collecting more + //information about failed olm operators + images = append(images, imageMap[op]) + } + } + c.log.Infof("collecting must-gather logs with images: %v", images) + return images +} + func (c controller) downloadKubeconfigNoingress(ctx context.Context, dir string) (string, error) { // Download kubeconfig file kubeconfigPath := path.Join(dir, kubeconfigFileName) @@ -911,7 +979,7 @@ func (c controller) downloadKubeconfigNoingress(ctx context.Context, dir string) return kubeconfigPath, nil } -func (c controller) collectMustGatherLogs(ctx context.Context, mustGatherImg string) (string, error) { +func (c controller) collectMustGatherLogs(ctx context.Context, images ...string) (string, error) { tempDir, ferr := ioutil.TempDir("", "controller-must-gather-logs-") if ferr != nil { c.log.Errorf("Failed to create temp directory for must-gather-logs %v\n", ferr) @@ -924,7 +992,7 @@ func (c controller) collectMustGatherLogs(ctx context.Context, mustGatherImg str } //collect must gather logs - logtar, err := c.ops.GetMustGatherLogs(tempDir, kubeconfigPath, mustGatherImg) + logtar, err := c.ops.GetMustGatherLogs(tempDir, kubeconfigPath, images...) if err != nil { c.log.Errorf("Failed to collect must-gather logs %v\n", err) return "", err @@ -936,7 +1004,7 @@ func (c controller) collectMustGatherLogs(ctx context.Context, mustGatherImg str // Uploading logs every 5 minutes // We will take logs of assisted controller and upload them to assisted-service // by creating tar gz of them. -func (c *controller) UploadLogs(ctx context.Context, wg *sync.WaitGroup, status *ControllerStatus) { +func (c *controller) UploadLogs(ctx context.Context, wg *sync.WaitGroup) { podName := "" ticker := time.NewTicker(LogsUploadPeriod) progressCtx := utils.GenerateRequestContext() @@ -953,7 +1021,7 @@ func (c *controller) UploadLogs(ctx context.Context, wg *sync.WaitGroup, status c.log.Infof("Upload final controller and cluster logs before exit") c.ic.ClusterLogProgressReport(progressCtx, c.ClusterID, models.LogsStateRequested) _ = utils.WaitForPredicate(WaitTimeout, LogsUploadPeriod, func() bool { - err := c.uploadSummaryLogs(podName, c.Namespace, controllerLogsSecondsAgo, status.HasError(), c.MustGatherImage) + err := c.uploadSummaryLogs(podName, c.Namespace, controllerLogsSecondsAgo) if err != nil { c.log.Infof("retry uploading logs in 5 minutes...") } diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 151c55772..307077d8d 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -85,7 +85,6 @@ var _ = Describe("installer HostRoleMaster role", func() { inventoryNamesIds map[string]inventory_client.HostData kubeNamesIds map[string]string wg sync.WaitGroup - status *ControllerStatus defaultStages []models.HostStage ) kubeNamesIds = map[string]string{"node0": "6d6f00e8-70dd-48a5-859a-0f1459485ad9", @@ -117,7 +116,6 @@ var _ = Describe("installer HostRoleMaster role", func() { models.HostStageDone} assistedController = NewController(l, defaultTestControllerConf, mockops, mockbmclient, mockk8sclient) - status = &ControllerStatus{} }) AfterEach(func() { ctrl.Finish() @@ -238,7 +236,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().CreateEvent(assistedController.Namespace, common.AssistedControllerIsReadyEvent, gomock.Any(), common.AssistedControllerPrefix).Return(nil, nil).Times(1) assistedController.SetReadyState() - Expect(status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasError()).Should(Equal(false)) }) It("waitAndUpdateNodesStatus happy flow - all nodes installing", func() { @@ -604,10 +602,10 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) wg.Add(1) - go assistedController.PostInstallConfigs(context.TODO(), &wg, status) + go assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasError()).Should(Equal(false)) }) It("failure", func() { WaitTimeout = 20 * time.Millisecond @@ -617,9 +615,9 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", false, gomock.Any()).Return(nil).Times(1) wg.Add(1) - go assistedController.PostInstallConfigs(context.TODO(), &wg, status) + go assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(true)) + Expect(assistedController.Status.HasError()).Should(Equal(true)) }) }) @@ -641,9 +639,9 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) wg.Add(1) - assistedController.PostInstallConfigs(context.TODO(), &wg, status) + assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasError()).Should(Equal(false)) }) It("failure", func() { WaitTimeout = 20 * time.Millisecond @@ -656,9 +654,9 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) wg.Add(1) - go assistedController.PostInstallConfigs(context.TODO(), &wg, status) + go assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(true)) + Expect(assistedController.Status.HasError()).Should(Equal(true)) }) }) @@ -712,9 +710,10 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) wg.Add(1) - assistedController.PostInstallConfigs(context.TODO(), &wg, status) + assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasOperatorError()).Should(Equal(false)) }) It("waiting for single OLM operator which timeouts", func() { @@ -740,9 +739,10 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) wg.Add(1) - assistedController.PostInstallConfigs(context.TODO(), &wg, status) + assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() - Expect(status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.HasError()).Should(Equal(false)) + Expect(assistedController.Status.GetOperatorsInError()).To(ContainElement("lso")) }) }) }) @@ -878,7 +878,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().GetPods(assistedController.Namespace, gomock.Any(), fmt.Sprintf("status.phase=%s", v1.PodRunning)).Return(nil, fmt.Errorf("dummy")).MinTimes(2).MaxTimes(10) ctx, cancel := context.WithCancel(context.Background()) wg.Add(1) - go assistedController.UploadLogs(ctx, &wg, status) + go assistedController.UploadLogs(ctx, &wg) time.Sleep(1 * time.Second) cancel() wg.Wait() @@ -890,7 +890,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().GetPodLogsAsBuffer(assistedController.Namespace, "test", gomock.Any()).Return(nil, fmt.Errorf("dummy")).MinTimes(1) ctx, cancel := context.WithCancel(context.Background()) wg.Add(1) - go assistedController.UploadLogs(ctx, &wg, status) + go assistedController.UploadLogs(ctx, &wg) time.Sleep(500 * time.Millisecond) cancel() wg.Wait() @@ -902,7 +902,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().UploadLogs(gomock.Any(), assistedController.ClusterID, models.LogsTypeController, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) logClusterOperatorsSuccess() reportLogProgressSuccess() - err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo, false, "") + err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo) Expect(err).To(HaveOccurred()) }) It("Validate upload logs happy flow (controllers logs only)", func() { @@ -911,7 +911,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().UploadLogs(gomock.Any(), assistedController.ClusterID, models.LogsTypeController, gomock.Any()).Return(nil).Times(1) logClusterOperatorsSuccess() reportLogProgressSuccess() - err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo, false, "") + err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo) Expect(err).NotTo(HaveOccurred()) }) @@ -921,7 +921,7 @@ var _ = Describe("installer HostRoleMaster role", func() { r := bytes.NewBuffer([]byte("test")) mockk8sclient.EXPECT().GetPodLogsAsBuffer(assistedController.Namespace, "test", gomock.Any()).Return(r, nil).Times(1) mockbmclient.EXPECT().UploadLogs(gomock.Any(), assistedController.ClusterID, models.LogsTypeController, gomock.Any()).Return(nil).Times(1) - err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo, false, "") + err := assistedController.uploadSummaryLogs("test", assistedController.Namespace, controllerLogsSecondsAgo) Expect(err).NotTo(HaveOccurred()) }) @@ -934,7 +934,7 @@ var _ = Describe("installer HostRoleMaster role", func() { callUploadLogs := func(waitTime time.Duration) { wg.Add(1) - go assistedController.UploadLogs(ctx, &wg, status) + go assistedController.UploadLogs(ctx, &wg) time.Sleep(waitTime) cancel() wg.Wait() @@ -964,7 +964,7 @@ var _ = Describe("installer HostRoleMaster role", func() { logClusterOperatorsSuccess() mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), assistedController.MustGatherImage).Return("../../test_files/tartest.tar.gz", nil).Times(1) mockbmclient.EXPECT().DownloadFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) - status.Error() + assistedController.Status.Error() callUploadLogs(150 * time.Millisecond) }) @@ -990,11 +990,37 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), gomock.Any()).Return("", fmt.Errorf("failed")) mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), gomock.Any()).Return("../../test_files/tartest.tar.gz", nil) mockbmclient.EXPECT().DownloadFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2) - status.Error() + assistedController.Status.Error() callUploadLogs(50 * time.Millisecond) }) }) + Context("must-gather image set parsing", func() { + var ac *controller + BeforeEach(func() { + ac = NewController(l, defaultTestControllerConf, mockops, mockbmclient, mockk8sclient) + }) + + It("MustGatherImage is empty", func() { + ac.MustGatherImage = "" + Expect(ac.parseMustGatherImages()).To(BeEmpty()) + }) + It("MustGatherImage is string", func() { + images := ac.parseMustGatherImages() + Expect(images).NotTo(BeEmpty()) + Expect(images[0]).To(Equal(ac.MustGatherImage)) + }) + It("MustGatherImage is json", func() { + ac.MustGatherImage = `{"ocp": "quay.io/openshift/must-gather", "cnv": "blah", "ocs": "foo"}` + ac.Status.Error() + ac.Status.OperatorError("cnv") + images := ac.parseMustGatherImages() + Expect(len(images)).To(Equal(2)) + Expect(images).To(ContainElement("quay.io/openshift/must-gather")) + Expect(images).To(ContainElement("blah")) + }) + }) + Context("getMaximumOLMTimeout", func() { It("Return general timeout if no OLM's present", func() { opertors := []*models.MonitoredOperator{} diff --git a/src/assisted_installer_controller/operator_handler.go b/src/assisted_installer_controller/operator_handler.go index 89320a34d..b32efed59 100644 --- a/src/assisted_installer_controller/operator_handler.go +++ b/src/assisted_installer_controller/operator_handler.go @@ -38,7 +38,7 @@ func (c controller) isOperatorAvailable(handler OperatorHandler) bool { if operatorStatusInService.Status != operatorStatus || (operatorStatusInService.StatusInfo != operatorMessage && operatorMessage != "") { c.log.Infof("Operator <%s> updated, status: %s -> %s, message: %s -> %s.", operatorName, operatorStatusInService.Status, operatorStatus, operatorStatusInService.StatusInfo, operatorMessage) if !handler.OnChange(operatorStatus) { - c.log.WithError(err).Warnf("%s operator on change failed", operatorName) + c.log.WithError(err).Warnf("<%s> operator's OnChange() returned false. Will skip an update.", operatorName) return false } @@ -125,11 +125,12 @@ func (handler ClusterVersionHandler) OnChange(_ models.OperatorStatus) bool { type ClusterServiceVersionHandler struct { kc k8s_client.K8SClient operator *models.MonitoredOperator + status *ControllerStatus retries int } -func NewClusterServiceVersionHandler(kc k8s_client.K8SClient, operator *models.MonitoredOperator) *ClusterServiceVersionHandler { - return &ClusterServiceVersionHandler{kc: kc, operator: operator, retries: 0} +func NewClusterServiceVersionHandler(kc k8s_client.K8SClient, operator *models.MonitoredOperator, status *ControllerStatus) *ClusterServiceVersionHandler { + return &ClusterServiceVersionHandler{kc: kc, operator: operator, status: status, retries: 0} } func (handler ClusterServiceVersionHandler) GetName() string { return handler.operator.Name } @@ -151,11 +152,14 @@ func (handler ClusterServiceVersionHandler) GetStatus() (models.OperatorStatus, } func (handler ClusterServiceVersionHandler) OnChange(newStatus models.OperatorStatus) bool { - // FIXME: We retry the check of the operator status in case it's in failed state to WA bug 1968606 - // Remove this code when bug 1968606 is fixed - if utils.IsStatusFailed(newStatus) && handler.retries < failedOperatorRetry { - handler.retries++ - return false + if utils.IsStatusFailed(newStatus) { + if handler.retries < failedOperatorRetry { + // FIXME: We retry the check of the operator status in case it's in failed state to WA bug 1968606 + // Remove this code when bug 1968606 is fixed + handler.retries++ + return false + } + handler.status.OperatorError(handler.operator.Name) } return true diff --git a/src/main/assisted-installer-controller/assisted_installer_main.go b/src/main/assisted-installer-controller/assisted_installer_main.go index 8effdc96e..4a5c45d83 100644 --- a/src/main/assisted-installer-controller/assisted_installer_main.go +++ b/src/main/assisted-installer-controller/assisted_installer_main.go @@ -66,7 +66,6 @@ func main() { ) var wg sync.WaitGroup - var status assistedinstallercontroller.ControllerStatus mainContext, mainContextCancel := context.WithCancel(context.Background()) // No need to cancel with context, will finish quickly @@ -89,16 +88,16 @@ func main() { go assistedController.WaitAndUpdateNodesStatus(mainContext, &wg) wg.Add(1) - go assistedController.PostInstallConfigs(mainContext, &wg, &status) + go assistedController.PostInstallConfigs(mainContext, &wg) wg.Add(1) go assistedController.UpdateBMHs(mainContext, &wg) wg.Add(1) - go assistedController.UploadLogs(mainContext, &wg, &status) + go assistedController.UploadLogs(mainContext, &wg) wg.Add(1) // monitoring installation by cluster status - waitForInstallation(client, logger, &status) + waitForInstallation(client, logger, assistedController.Status) } // waitForInstallation monitor cluster status and is blocking main from cancelling all go routine s diff --git a/src/main/assisted-installer-controller/assisted_installer_main_test.go b/src/main/assisted-installer-controller/assisted_installer_main_test.go index e2316e9b9..5a847dc5a 100644 --- a/src/main/assisted-installer-controller/assisted_installer_main_test.go +++ b/src/main/assisted-installer-controller/assisted_installer_main_test.go @@ -37,7 +37,7 @@ var _ = Describe("installer HostRoleMaster role", func() { ctrl = gomock.NewController(GinkgoT()) mockbmclient = inventory_client.NewMockInventoryClient(ctrl) waitForInstallationInterval = 10 * time.Millisecond - status = &assistedinstallercontroller.ControllerStatus{} + status = assistedinstallercontroller.NewControllerStatus() }) AfterEach(func() { ctrl.Finish() diff --git a/src/ops/mock_ops.go b/src/ops/mock_ops.go index 2f1e85266..5da042397 100644 --- a/src/ops/mock_ops.go +++ b/src/ops/mock_ops.go @@ -308,18 +308,23 @@ func (mr *MockOpsMockRecorder) CreateOpenshiftSshManifest(filePath, template, ss } // GetMustGatherLogs mocks base method -func (m *MockOps) GetMustGatherLogs(workDir, kubeconfigPath, mustGatherImg string) (string, error) { +func (m *MockOps) GetMustGatherLogs(workDir, kubeconfigPath string, images ...string) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMustGatherLogs", workDir, kubeconfigPath, mustGatherImg) + varargs := []interface{}{workDir, kubeconfigPath} + for _, a := range images { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetMustGatherLogs", varargs...) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // GetMustGatherLogs indicates an expected call of GetMustGatherLogs -func (mr *MockOpsMockRecorder) GetMustGatherLogs(workDir, kubeconfigPath, mustGatherImg interface{}) *gomock.Call { +func (mr *MockOpsMockRecorder) GetMustGatherLogs(workDir, kubeconfigPath interface{}, images ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMustGatherLogs", reflect.TypeOf((*MockOps)(nil).GetMustGatherLogs), workDir, kubeconfigPath, mustGatherImg) + varargs := append([]interface{}{workDir, kubeconfigPath}, images...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMustGatherLogs", reflect.TypeOf((*MockOps)(nil).GetMustGatherLogs), varargs...) } // CreateRandomHostname mocks base method @@ -366,7 +371,7 @@ func (mr *MockOpsMockRecorder) EvaluateDiskSymlink(arg0 interface{}) *gomock.Cal } // CreateManifests mocks base method -func (m *MockOps) CreateManifests(arg0 string, arg1 string) error { +func (m *MockOps) CreateManifests(arg0, arg1 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateManifests", arg0, arg1) ret0, _ := ret[0].(error) @@ -374,7 +379,7 @@ func (m *MockOps) CreateManifests(arg0 string, arg1 string) error { } // CreateManifests indicates an expected call of CreateManifests -func (mr *MockOpsMockRecorder) CreateManifests(arg0 interface{}, arg1 interface{}) *gomock.Call { +func (mr *MockOpsMockRecorder) CreateManifests(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateManifests", reflect.TypeOf((*MockOps)(nil).CreateManifests), arg0, arg1) } diff --git a/src/ops/ops.go b/src/ops/ops.go index 066f74dd1..6d0e22b2d 100644 --- a/src/ops/ops.go +++ b/src/ops/ops.go @@ -43,7 +43,7 @@ type Ops interface { UploadInstallationLogs(isBootstrap bool) (string, error) ReloadHostFile(filepath string) error CreateOpenshiftSshManifest(filePath, template, sshPubKeyPath string) error - GetMustGatherLogs(workDir, kubeconfigPath, mustGatherImg string) (string, error) + GetMustGatherLogs(workDir, kubeconfigPath string, images ...string) (string, error) CreateRandomHostname(hostname string) error GetHostname() (string, error) EvaluateDiskSymlink(string) string @@ -514,16 +514,13 @@ func (o *ops) CreateOpenshiftSshManifest(filePath, tmpl, sshPubKeyPath string) e return nil } -func (o *ops) GetMustGatherLogs(workDir, kubeconfigPath, mustGatherImg string) (string, error) { +func (o *ops) GetMustGatherLogs(workDir, kubeconfigPath string, images ...string) (string, error) { //invoke oc adm must-gather command in the working directory - var imageOption string - if mustGatherImg == "" { - o.log.Infof("collecting must-gather logs into %s using image from release", workDir) - imageOption = "" - } else { - o.log.Infof("collecting must-gather logs into %s using image %s", workDir, mustGatherImg) - imageOption = fmt.Sprintf(" --image=%s", mustGatherImg) + var imageOption string = "" + for _, img := range images { + imageOption = imageOption + fmt.Sprintf(" --image=%s", img) } + command := fmt.Sprintf("cd %s && oc --kubeconfig=%s adm must-gather%s", workDir, kubeconfigPath, imageOption) output, err := o.ExecCommand(o.logWriter, "bash", "-c", command) if err != nil { From 57c4c942d12416ae3821d7df72b3558137640d57 Mon Sep 17 00:00:00 2001 From: Eran Cohen Date: Sun, 25 Jul 2021 13:31:18 +0300 Subject: [PATCH 12/43] Bug 1981465: Assisted installer wait for ready master nodes on bootstrap kube-apiserver though the kube-apiserver moved to one of the masters (#327) On the bootstrap node the assisted-installer use the loopback-kubeconfig to query the kube-apiserver for the number of ready master nodes. Usually both master nodes join the cluster and become ready before bootkube takes down the bootstrap control plane so the loopback kubeconfig works. But in case clusteer bootstrap finish before the 2 master nodes are ready the assisted-installer will wait forever since the it's using the loopback-kubeconfig and the bootstrap control plane is down resulting in "connection refused" The assisted-installer should query the kube-apiserver running on one of the master nodes, for that to work it should use the real kubeconfig instead of the loopback kubeconfig. --- src/installer/installer.go | 24 +++++++++--------------- src/installer/installer_test.go | 14 ++++++-------- 2 files changed, 15 insertions(+), 23 deletions(-) diff --git a/src/installer/installer.go b/src/installer/installer.go index ab43e0a83..df4ea6313 100644 --- a/src/installer/installer.go +++ b/src/installer/installer.go @@ -26,7 +26,6 @@ import ( const ( InstallDir = "/opt/install-dir" - KubeconfigPathLoopBack = "/opt/openshift/auth/kubeconfig-loopback" KubeconfigPath = "/opt/openshift/auth/kubeconfig" minMasterNodes = 2 dockerConfigFile = "/root/.docker/config.json" @@ -338,12 +337,18 @@ func (i *installer) waitForNetworkType(kc k8s_client.K8SClient) error { } func (i *installer) waitForControlPlane(ctx context.Context) error { - kc, err := i.kcBuilder(KubeconfigPathLoopBack, i.log) + err := i.ops.ReloadHostFile("/etc/resolv.conf") + if err != nil { + i.log.WithError(err).Error("Failed to reload resolv.conf") + return err + } + kc, err := i.kcBuilder(KubeconfigPath, i.log) if err != nil { i.log.Error(err) return err } i.UpdateHostInstallProgress(models.HostStageWaitingForControlPlane, "") + if err = i.waitForMinMasterNodes(ctx, kc); err != nil { return err } @@ -365,7 +370,7 @@ func (i *installer) waitForControlPlane(ctx context.Context) error { i.waitForBootkube(ctx) // waiting for controller pod to be running - if err := i.waitForController(); err != nil { + if err := i.waitForController(kc); err != nil { i.log.Error(err) return err } @@ -470,20 +475,9 @@ func (i *installer) waitForBootkube(ctx context.Context) { } } -func (i *installer) waitForController() error { +func (i *installer) waitForController(kc k8s_client.K8SClient) error { i.log.Infof("Waiting for controller to be ready") i.UpdateHostInstallProgress(models.HostStageWaitingForController, "waiting for controller pod ready event") - err := i.ops.ReloadHostFile("/etc/resolv.conf") - if err != nil { - i.log.WithError(err).Error("Failed to reload resolv.conf") - return err - } - - kc, err := i.kcBuilder(KubeconfigPath, i.log) - if err != nil { - i.log.WithError(err).Errorf("Failed to create kc client from %s", KubeconfigPath) - return err - } events := map[string]string{} tickerUploadLogs := time.NewTicker(5 * time.Minute) diff --git a/src/installer/installer_test.go b/src/installer/installer_test.go index 56665a4b4..8703800fd 100644 --- a/src/installer/installer_test.go +++ b/src/installer/installer_test.go @@ -404,7 +404,7 @@ var _ = Describe("installer HostRoleMaster role", func() { Expect(ret).Should(Equal(err)) }) }) - Context("Bootstrap role waiting for controller", func() { + Context("Bootstrap role waiting for control plane", func() { conf := config.Config{Role: string(models.HostRoleBootstrap), ClusterID: "cluster-id", @@ -417,21 +417,19 @@ var _ = Describe("installer HostRoleMaster role", func() { BeforeEach(func() { installerObj = NewAssistedInstaller(l, conf, mockops, mockbmclient, k8sBuilder, mockIgnition) }) + It("waitForControlPlane reload resolv.conf failed", func() { + mockops.EXPECT().ReloadHostFile("/etc/resolv.conf").Return(fmt.Errorf("failed to load file")).Times(1) - It("waitForController reload resolv.conf failed", func() { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStageWaitingForController, "waiting for controller pod ready event").Return(nil).Times(1) - mockops.EXPECT().ReloadHostFile("/etc/resolv.conf").Return(fmt.Errorf("dummy")).Times(1) - - err := installerObj.waitForController() + err := installerObj.waitForControlPlane(context.Background()) Expect(err).To(HaveOccurred()) }) + It("waitForController reload get pods fails then succeeds", func() { - resolvConfSuccess() reportLogProgressSuccess() mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStageWaitingForController, "waiting for controller pod ready event").Return(nil).Times(1) mockk8sclient.EXPECT().GetPods("assisted-installer", gomock.Any(), "").Return(nil, fmt.Errorf("dummy")).Times(1) mockk8sclient.EXPECT().ListEvents(assistedControllerNamespace).Return(&events, nil).Times(1) - err := installerObj.waitForController() + err := installerObj.waitForController(mockk8sclient) Expect(err).NotTo(HaveOccurred()) }) It("Configuring state", func() { From e285f301ffdc0296659346d66f9013b388632132 Mon Sep 17 00:00:00 2001 From: Igal Tsoiref Date: Tue, 27 Jul 2021 23:28:45 +0300 Subject: [PATCH 13/43] OCPBUGSM-27526: Cluster deployment freeze on Finalizing stage forever on Cluster Version Operator (#334) Fix for very specific case when cvo has new messages all the time but in reality it is stuck. Adding CVOMaxTimeout with 3 hours. --- .../assisted_installer_controller.go | 8 ++++-- .../assisted_installer_controller_test.go | 26 ++++++++++++++++++- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index 47de57ccb..f6e268f14 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -62,6 +62,7 @@ var ( DNSAddressRetryInterval = 20 * time.Second DeletionRetryInterval = 10 * time.Second LongWaitTimeout = 10 * time.Hour + CVOMaxTimeout = 3 * time.Hour ) // assisted installer controller is added to control installation process after bootstrap pivot @@ -832,12 +833,15 @@ func (c controller) validateConsoleAvailability() bool { // waitingForClusterVersion checks the Cluster Version Operator availability in the // new OCP cluster. A success would be announced only when the service acknowledges // the CVO availability, in order to avoid unsycned scenarios. +// In case cvo changes it message we will update timer but we want to have maximum timeout +// for this context with timeout is used func (c controller) waitingForClusterVersion(ctx context.Context) error { + ctxWithTimeout, cancel := context.WithTimeout(ctx, CVOMaxTimeout) + defer cancel() isClusterVersionAvailable := func(timer *time.Timer) bool { return c.isOperatorAvailable(NewClusterVersionHandler(c.kc, timer)) } - - return utils.WaitForPredicateWithTimer(ctx, WaitTimeout, GeneralProgressUpdateInt, isClusterVersionAvailable) + return utils.WaitForPredicateWithTimer(ctxWithTimeout, WaitTimeout, GeneralProgressUpdateInt, isClusterVersionAvailable) } func (c controller) sendCompleteInstallation(ctx context.Context, isSuccess bool, errorInfo string) { diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 307077d8d..850cfeec3 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -1283,6 +1283,7 @@ var _ = Describe("installer HostRoleMaster role", func() { BeforeEach(func() { GeneralProgressUpdateInt = 100 * time.Millisecond WaitTimeout = 150 * time.Millisecond + CVOMaxTimeout = 1 * time.Second }) for i := range tests { @@ -1337,8 +1338,31 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, gomock.Any(), gomock.Any()).AnyTimes() err := func() error { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctxTimeout, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() + return assistedController.waitingForClusterVersion(ctxTimeout) + }() + + Expect(errors.Is(err, context.DeadlineExceeded)).To(BeTrue()) + }) + + It("service fail to sync - maxTimeout applied", func() { + WaitTimeout = 1 * time.Second + CVOMaxTimeout = 200 * time.Millisecond + currentServiceCVOStatus := &models.MonitoredOperator{Status: models.OperatorStatusProgressing, StatusInfo: ""} + clusterVersionReport := &configv1.ClusterVersion{ + Status: configv1.ClusterVersionStatus{ + Conditions: []configv1.ClusterOperatorStatusCondition{ + {Type: configv1.OperatorAvailable, Status: configv1.ConditionTrue, Message: ""}, + }, + }, + } + + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(clusterVersionReport, nil).AnyTimes() + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(currentServiceCVOStatus, nil).AnyTimes() + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, gomock.Any(), gomock.Any()).AnyTimes() + + err := func() error { return assistedController.waitingForClusterVersion(ctx) }() From 5273a2c3a8c84e5e41493b59493f392b1b3ae931 Mon Sep 17 00:00:00 2001 From: slavie Date: Wed, 28 Jul 2021 15:57:27 +0300 Subject: [PATCH 14/43] MGMT-4893: quote must-gather-image env variable in controller manifest (#335) This will allow both string and json input to the must-gather image --- .../assisted-installer-controller-cm.yaml.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/assisted-installer-controller/assisted-installer-controller-cm.yaml.template b/deploy/assisted-installer-controller/assisted-installer-controller-cm.yaml.template index e5e58e057..f888865b0 100644 --- a/deploy/assisted-installer-controller/assisted-installer-controller-cm.yaml.template +++ b/deploy/assisted-installer-controller/assisted-installer-controller-cm.yaml.template @@ -12,4 +12,4 @@ data: ca-cert-path: '{{.CACertPath}}' check-cluster-version: '{{.CheckCVO}}' high-availability-mode: {{.HaMode}} - must-gather-image: {{.MustGatherImage}} \ No newline at end of file + must-gather-image: '{{.MustGatherImage}}' \ No newline at end of file From 8c2351a016d7e66f712ed77642c7cf9770c726d2 Mon Sep 17 00:00:00 2001 From: Igal Tsoiref Date: Wed, 4 Aug 2021 07:45:09 +0300 Subject: [PATCH 15/43] MGMT-3817: asssited-controller should always send logs (#337) Right now in case we kube-api-server is not reachable we will not send any logs. This code is changing, from now in case kube-api error we will send this error as log --- .../assisted_installer_controller_test.go | 1 + src/common/common.go | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 850cfeec3..078cfe4ee 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -888,6 +888,7 @@ var _ = Describe("installer HostRoleMaster role", func() { reportLogProgressSuccess() mockk8sclient.EXPECT().GetPods(assistedController.Namespace, gomock.Any(), fmt.Sprintf("status.phase=%s", v1.PodRunning)).Return([]v1.Pod{pod}, nil).MinTimes(1) mockk8sclient.EXPECT().GetPodLogsAsBuffer(assistedController.Namespace, "test", gomock.Any()).Return(nil, fmt.Errorf("dummy")).MinTimes(1) + mockbmclient.EXPECT().UploadLogs(gomock.Any(), assistedController.ClusterID, models.LogsTypeController, gomock.Any()).Return(nil).MinTimes(1) ctx, cancel := context.WithCancel(context.Background()) wg.Add(1) go assistedController.UploadLogs(ctx, &wg) diff --git a/src/common/common.go b/src/common/common.go index 40bb1e0bc..b8a090e82 100644 --- a/src/common/common.go +++ b/src/common/common.go @@ -1,6 +1,7 @@ package common import ( + "bytes" "fmt" "io" "regexp" @@ -97,7 +98,8 @@ func UploadPodLogs(kc k8s_client.K8SClient, ic inventory_client.InventoryClient, log.Infof("Uploading logs for %s in %s", podName, namespace) podLogs, err := kc.GetPodLogsAsBuffer(namespace, podName, sinceSeconds) if err != nil { - return errors.Wrapf(err, "Failed to get logs of pod %s", podName) + podLogs = &bytes.Buffer{} + podLogs.WriteString(errors.Wrapf(err, "Failed to get logs of pod %s", podName).Error()) } pr, pw := io.Pipe() defer pr.Close() From 2403dad3795406f2c5d923af0894e07bc8b0bdc4 Mon Sep 17 00:00:00 2001 From: Igal Tsoiref Date: Thu, 5 Aug 2021 12:40:56 +0300 Subject: [PATCH 16/43] MGMT-7452: Remove token from assisted-installer-controller log (#338) --- .../assisted_installer_controller.go | 4 ++-- src/ops/ops.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index f6e268f14..e1ed7942b 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -70,9 +70,9 @@ var ( // as a first step it will wait till nodes are added to cluster and update their status to Done type ControllerConfig struct { - ClusterID string `envconfig:"CLUSTER_ID" required:"true" ` + ClusterID string `envconfig:"CLUSTER_ID" required:"true"` URL string `envconfig:"INVENTORY_URL" required:"true"` - PullSecretToken string `envconfig:"PULL_SECRET_TOKEN" required:"true"` + PullSecretToken string `envconfig:"PULL_SECRET_TOKEN" required:"true" secret:"true"` SkipCertVerification bool `envconfig:"SKIP_CERT_VERIFICATION" required:"false" default:"false"` CACertPath string `envconfig:"CA_CERT_PATH" required:"false" default:""` Namespace string `envconfig:"NAMESPACE" required:"false" default:"assisted-installer"` diff --git a/src/ops/ops.go b/src/ops/ops.go index 6d0e22b2d..ee8b66a4e 100644 --- a/src/ops/ops.go +++ b/src/ops/ops.go @@ -238,7 +238,7 @@ func (o *ops) SetBootOrder(device string) error { } func (o *ops) ExtractFromIgnition(ignitionPath string, fileToExtract string) error { - o.log.Infof("Getting pull secret from %s", ignitionPath) + o.log.Infof("Getting data from %s", ignitionPath) ignitionData, err := ioutil.ReadFile(ignitionPath) if err != nil { o.log.Errorf("Error occurred while trying to read %s : %e", ignitionPath, err) From 424639f00850b7f634f889e71e5e3188bae1cc78 Mon Sep 17 00:00:00 2001 From: Igal Tsoiref Date: Thu, 5 Aug 2021 15:30:12 +0300 Subject: [PATCH 17/43] OCPBUGSM-32117: Adding ipv6 support to dns busy address workaroung (#339) We have a workaround that deletes service that took address of dns service. Till now it supported only ipv4. This change adds ipv6 support --- .../assisted_installer_controller.go | 19 ++++++++++--------- .../assisted_installer_controller_test.go | 14 +++++++++++--- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index e1ed7942b..96706c7d8 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -254,13 +254,14 @@ func (c *controller) HackDNSAddressConflict(wg *sync.WaitGroup) { return } - ip, _, _ := net.ParseCIDR(networks[0]) - ip4 := ip.To4() - if ip4 == nil { - c.log.Infof("Service network is IPv6: %s, skipping the .10 address hack", ip) + netIp, _, _ := net.ParseCIDR(networks[0]) + ip := netIp.To16() + if ip == nil { + c.log.Infof("Failed to parse service network cidr %s, skipping", networks[0]) return } - ip4[3] = 10 // .10 is the conflicting address + + ip[len(ip)-1] = 10 // .10 or :a is the conflicting address for i := 0; i < maxDNSServiceIPAttempts; i++ { svs, err := c.kc.ListServices("") @@ -269,17 +270,17 @@ func (c *controller) HackDNSAddressConflict(wg *sync.WaitGroup) { time.Sleep(DNSAddressRetryInterval) continue } - s := c.findServiceByIP(ip4.String(), &svs.Items) + s := c.findServiceByIP(ip.String(), &svs.Items) if s == nil { - c.log.Infof("No service found with IP %s, attempt %d/%d", ip4, i+1, maxDNSServiceIPAttempts) + c.log.Infof("No service found with IP %s, attempt %d/%d", ip, i+1, maxDNSServiceIPAttempts) time.Sleep(DNSAddressRetryInterval) continue } if s.Name == dnsServiceName && s.Namespace == dnsServiceNamespace { - c.log.Infof("Service %s has successfully taken IP %s", dnsServiceName, ip4) + c.log.Infof("Service %s has successfully taken IP %s", dnsServiceName, ip) break } - c.log.Warnf("Deleting service %s in namespace %s whose IP %s conflicts with %s", s.Name, s.Namespace, ip4, dnsServiceName) + c.log.Warnf("Deleting service %s in namespace %s whose IP %s conflicts with %s", s.Name, s.Namespace, ip, dnsServiceName) if err := c.killConflictingService(s); err != nil { c.log.WithError(err).Warnf("Failed to delete service %s in namespace %s", s.Name, s.Namespace) continue diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 078cfe4ee..86990fb4e 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -191,7 +191,7 @@ var _ = Describe("installer HostRoleMaster role", func() { setConsoleAsAvailable(clusterID) } - returnServiceWithDot10Address := func(name, namespace string) *gomock.Call { + returnServiceWithAddress := func(name, namespace, ip string) *gomock.Call { return mockk8sclient.EXPECT().ListServices("").Return(&v1.ServiceList{ Items: []v1.Service{ { @@ -200,13 +200,17 @@ var _ = Describe("installer HostRoleMaster role", func() { Namespace: namespace, }, Spec: v1.ServiceSpec{ - ClusterIP: "10.56.20.10", + ClusterIP: ip, }, }, }, }, nil) } + returnServiceWithDot10Address := func(name, namespace string) *gomock.Call { + return returnServiceWithAddress(name, namespace, "10.56.20.10") + } + returnServiceNetwork := func() { mockk8sclient.EXPECT().GetServiceNetworks().Return([]string{"10.56.20.0/24"}, nil) } @@ -1415,8 +1419,12 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().GetServiceNetworks().Return(nil, errors.New("get service network failed")) hackConflict() }) - It("Exit if service network is IPv6", func() { + It("Kill service and DNS pods if DNS service IP is taken in IPV6 env", func() { mockk8sclient.EXPECT().GetServiceNetworks().Return([]string{"2002:db8::/64"}, nil) + returnServiceWithAddress(conflictServiceName, conflictServiceNamespace, "2002:db8::a") + mockk8sclient.EXPECT().DeleteService(conflictServiceName, conflictServiceNamespace).Return(nil) + mockk8sclient.EXPECT().DeletePods(dnsOperatorNamespace).Return(nil) + returnServiceWithAddress(dnsServiceName, dnsServiceNamespace, "2002:db8::a") hackConflict() }) It("Retry if list services fails", func() { From 99a632124fb2babb1557cf0f97b4af908a20f845 Mon Sep 17 00:00:00 2001 From: Yevgeny Shnaidman <60741237+yevgeny-shnaidman@users.noreply.github.com> Date: Sun, 8 Aug 2021 17:44:32 +0300 Subject: [PATCH 18/43] MGMT-7417: Change installer and installer controller to support infra-env for the host (#336) Assisted-installer will get infra-env-id as part of install command aragument, and will use it to update host progress and to download host ignition Assisted-installer-controller will use each host's InfraEnvID field to update it progress in assisted-service --- go.mod | 16 +-- go.sum | 103 ++++++++---------- .../assisted_installer_controller.go | 4 +- .../assisted_installer_controller_test.go | 30 +++-- src/common/common.go | 2 +- src/common/common_test.go | 15 +-- src/config/config.go | 5 + src/installer/installer.go | 4 +- src/installer/installer_test.go | 53 +++++---- src/inventory_client/inventory_client.go | 16 +-- src/inventory_client/inventory_client_test.go | 29 ++--- src/inventory_client/mock_inventory_client.go | 16 +-- src/ops/coreos_installer_log_writer.go | 6 +- src/ops/coreos_installer_log_writer_test.go | 6 +- src/ops/ops.go | 2 +- 15 files changed, 159 insertions(+), 148 deletions(-) diff --git a/go.mod b/go.mod index 2ae12111d..200d56eba 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/aybabtme/iocontrol v0.0.0-20150809002002-ad15bcfc95a0 // indirect github.com/benbjohnson/clock v1.0.3 // indirect github.com/coreos/ignition/v2 v2.10.1 + github.com/go-logr/logr v0.4.0 // indirect github.com/go-openapi/runtime v0.19.28 github.com/go-openapi/strfmt v0.20.1 github.com/go-openapi/swag v0.19.9 @@ -16,11 +17,11 @@ require ( github.com/hashicorp/go-version v1.3.0 github.com/kelseyhightower/envconfig v1.4.0 github.com/metal3-io/baremetal-operator v0.0.0 - github.com/onsi/ginkgo v1.16.2 - github.com/onsi/gomega v1.12.0 + github.com/onsi/ginkgo v1.16.4 + github.com/onsi/gomega v1.13.0 github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible github.com/openshift/assisted-installer-agent v0.0.0-20200811180147-bc9c7b899b8a - github.com/openshift/assisted-service v1.0.10-0.20210526082015-cf99d1fca3fe + github.com/openshift/assisted-service v1.0.10-0.20210808073533-4afc4b5ae515 github.com/openshift/client-go v0.0.0-20201020074620-f8fd44879f7c github.com/openshift/machine-api-operator v0.2.1-0.20201002104344-6abfb5440597 github.com/operator-framework/api v0.8.0 @@ -30,13 +31,12 @@ require ( github.com/thoas/go-funk v0.8.0 github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 - golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c gopkg.in/yaml.v2 v2.4.0 - honnef.co/go/tools v0.0.1-2020.1.6 // indirect - k8s.io/api v0.21.0 - k8s.io/apimachinery v0.21.0 + k8s.io/api v0.21.1 + k8s.io/apimachinery v0.21.1 k8s.io/client-go v12.0.0+incompatible - sigs.k8s.io/controller-runtime v0.8.3 + sigs.k8s.io/controller-runtime v0.9.0 ) replace ( diff --git a/go.sum b/go.sum index 01b6abb0d..c7ec41043 100644 --- a/go.sum +++ b/go.sum @@ -70,7 +70,6 @@ github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -118,7 +117,6 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat6 github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -153,7 +151,6 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.30.28/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.32.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.21/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.34.28 h1:sscPpn/Ns3i0F4HPEWAVcwdIRaZZCuL7llJ2/60yPIk= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aybabtme/iocontrol v0.0.0-20150809002002-ad15bcfc95a0 h1:0NmehRCgyk5rljDQLKUO+cRJCnduDyn11+zGZIc9Z48= @@ -188,7 +185,6 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3k github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RSSp2Om9lubZpiMgVbvn39bsUmW9U5h0twqc= github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= @@ -312,7 +308,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/diskfs/go-diskfs v1.1.2-0.20210216073915-ba492710e2d8 h1:1QzBnogt3Wut5Qw/6qKFLey1fjBgRHPp6deHCLmsKe4= github.com/diskfs/go-diskfs v1.1.2-0.20210216073915-ba492710e2d8/go.mod h1:ZTeTbzixuyfnZW5y5qKMtjV2o+GLLHo1KfMhotJI4Rk= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -324,7 +319,6 @@ github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce h1:KXS1Jg+ddGcWA8e1N7cupxaHHZhit5rB9tfDU+mfjyY= github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -342,7 +336,6 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNE github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= @@ -382,7 +375,6 @@ github.com/filanov/stateswitch v0.0.0-20200714113403-51a42a34c604/go.mod h1:GYnX github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= @@ -395,6 +387,7 @@ github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05 github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= @@ -409,6 +402,7 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -591,7 +585,6 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -674,7 +667,6 @@ github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEo github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20190601041439-ed7b1b5ee0f8/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -792,9 +784,7 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= @@ -807,13 +797,13 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jsonnet-bundler/jsonnet-bundler v0.2.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -837,7 +827,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -945,7 +934,6 @@ github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8 github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/moby v1.13.1 h1:mC5WwQwCXt/dYxZ1cIrRsnJAWw7VdtcTZUIGr4tXzOM= github.com/moby/moby v1.13.1/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.3.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= @@ -1005,8 +993,9 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.2 h1:HFB2fbVIlhIfCfOW81bZFbiC/RvnpXSdhbF2/DJr134= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1016,8 +1005,8 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.12.0 h1:p4oGGk2M2UJc0wWN4lHFvIB71lxsh0T/UiKCCgFADY8= -github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1039,19 +1028,17 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.m github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/openshift-online/ocm-sdk-go v0.1.160 h1:IrmZoTmPiqa8VmdthDcztWHmXHeHiFDvF5wife6hYdc= -github.com/openshift-online/ocm-sdk-go v0.1.160/go.mod h1:9y8jM+VhZdl5VLy8l3RI+uDltcWPL1oW6lqtjtoDHqY= -github.com/openshift-online/ocm-sdk-go v0.1.165 h1:NndMhSbJzTsBgZuoIgDhHHmk6pgF9rYmJOYikAunJxg= -github.com/openshift-online/ocm-sdk-go v0.1.165/go.mod h1:/DStCZJQ2XOV/ktkODyVnUCPnGfH3agwp0e+GZTLr3E= +github.com/openshift-online/ocm-sdk-go v0.1.190 h1:GKQbhOeNIHNVQGBAPKhzPyUTrKKatz2j4d4AU2DNnJQ= +github.com/openshift-online/ocm-sdk-go v0.1.190/go.mod h1:XpupkiWFXkiAPdgS8Dq7Gknk2E6AximJnpC98Hk4fl4= github.com/openshift/api v0.0.0-20200901182017-7ac89ba6b971 h1:l4jU2pbYCFlWffDL8gQaN24UohhHI8Zq/zmiSpYzy7o= github.com/openshift/api v0.0.0-20200901182017-7ac89ba6b971/go.mod h1:M3xexPhgM8DISzzRpuFUy+jfPjQPIcs9yqEYj17mXV8= github.com/openshift/assisted-installer-agent v0.0.0-20200811180147-bc9c7b899b8a h1:mk+JGnFSuRTTWzODLs1gclp5om0+k4lH8btJSJxQA80= github.com/openshift/assisted-installer-agent v0.0.0-20200811180147-bc9c7b899b8a/go.mod h1:q1jXr/OgGA7bOBu1uzlZXOjExPHIoAXxzQlifXBmXLY= github.com/openshift/assisted-service v0.0.0-20200811075806-62dcbcd62c0b/go.mod h1:H96z5QdPNv7PZ+/p+VafuHyAUqlVcpOFTnfMl8QYzQ4= -github.com/openshift/assisted-service v1.0.10-0.20210310114450-f60ae14adc85 h1:SPISKZsxv4CWXd6sVFiS6jE5pUSLUVuPEFJXAtDOvrc= -github.com/openshift/assisted-service v1.0.10-0.20210310114450-f60ae14adc85/go.mod h1:73ccdCnFCgHCVdJJcAZUf/fM3AWOctmMosIRF+fgCNM= -github.com/openshift/assisted-service v1.0.10-0.20210526082015-cf99d1fca3fe h1:EW8FQ82Q7uDTWqsKURv9sRu31K4yeLRfXAaIO60e7Dg= -github.com/openshift/assisted-service v1.0.10-0.20210526082015-cf99d1fca3fe/go.mod h1:uRETrhDQ7oKwMmc45F4yXXnf+gfryMDJnXq7pPFcayU= +github.com/openshift/assisted-service v1.0.10-0.20210729090313-b33b6f69330b h1:8b4eTlKzJFVZmtwFHEjHflBPIv5UHx7Cqr7txb3Xf0I= +github.com/openshift/assisted-service v1.0.10-0.20210729090313-b33b6f69330b/go.mod h1:06CYHjrS5tanbGRM4ZB3Sd0gcaSIfMkXNjDmZVFAWhs= +github.com/openshift/assisted-service v1.0.10-0.20210808073533-4afc4b5ae515 h1:1CnH/Cy9KjPeT3+ThA6nLSUY3cA3IEix1k3rKvNuxJY= +github.com/openshift/assisted-service v1.0.10-0.20210808073533-4afc4b5ae515/go.mod h1:06CYHjrS5tanbGRM4ZB3Sd0gcaSIfMkXNjDmZVFAWhs= github.com/openshift/baremetal-operator v0.0.0-20200715132148-0f91f62a41fe h1:bu99IMkaN6o/JcxpWEb1eT8gDdL9hLcwOmfiVIbXWj8= github.com/openshift/baremetal-operator v0.0.0-20200715132148-0f91f62a41fe/go.mod h1:DOgBIuBcXuTD8uub0jL7h6gBdIBt3CFrwz6K2FtfMBA= github.com/openshift/build-machinery-go v0.0.0-20200819073603-48aa266c95f7/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= @@ -1066,9 +1053,7 @@ github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201016155852- github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201016155852-4090a6970205/go.mod h1:oOG/TNSBse4brosfLCH/G2Q/42ye+DZQq8VslA5SxOs= github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002065957-9854f7420570 h1:Bmi2b7YADMXpNQ6EPV4rQqoVRSjj3dzDU3dSAEKXut0= github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002065957-9854f7420570/go.mod h1:7NRECVE26rvP1/fs1CbhfY5gsgnnFQNhb9txTFzWmUw= -github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= github.com/openshift/custom-resource-status v1.1.0/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= -github.com/openshift/hive/apis v0.0.0-20210302234131-7026427c0ae5/go.mod h1:jpKB2/wcJC/WZF0xg2TqOrH6QHG+8/4eqFsAvZQkGdo= github.com/openshift/hive/apis v0.0.0-20210506000654-5c038fb05190/go.mod h1:Ujw9ImzSYvo9VlUX6Gjy7zPFP7xYUAU50tdf1wPpN6c= github.com/openshift/library-go v0.0.0-20200909173121-1d055d971916/go.mod h1:6vwp+YhYOIlj8MpkQKkebTTSn2TuYyvgiAFQ206jIEQ= github.com/openshift/machine-api-operator v0.2.1-0.20201026110925-50ea569da51b h1:vV5t7qPtp1GrowV+eJvUgzqxf/ZtSOvTsjLKgSN9caw= @@ -1129,7 +1114,6 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.3.0+incompatible h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk= github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1139,7 +1123,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/xattr v0.4.1 h1:dhclzL6EqOXNaPDWqoeb9tIxATfBSmjqL0b4DpSjwRw= github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -1148,6 +1131,7 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.49.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1163,8 +1147,9 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1183,8 +1168,9 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1199,8 +1185,9 @@ github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v1.8.2-0.20200110114423-1e64d757f711/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v2.3.2+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= @@ -1253,11 +1240,9 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/slok/go-http-metrics v0.8.0 h1:rsIKW30MzLjbWRBkCQoe/Oxh/F283MKT6afdH3mXTaA= github.com/slok/go-http-metrics v0.8.0/go.mod h1:f22ekj0Ht4taz2clntVmLRSK4D+feX33zkdDW0Eytvk= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -1302,8 +1287,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stripe/safesql v0.2.0/go.mod h1:q7b2n0JmzM1mVGfcYpanfVb2j23cXZeWFxcILPn3JV4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1329,7 +1315,6 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -1373,6 +1358,7 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= @@ -1426,7 +1412,6 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -1468,8 +1453,8 @@ golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1491,7 +1476,6 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1500,8 +1484,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1556,6 +1540,7 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1574,8 +1559,10 @@ golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1672,8 +1659,13 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1757,7 +1749,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200327195553-82bb89366a1e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200410194907-79a7a3126eef/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200430192856-2840dafb9ee1/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1769,10 +1760,9 @@ golang.org/x/tools v0.0.0-20200610160956-3e83d1e96d0e/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1879,7 +1869,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1889,7 +1878,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/djherbis/times.v1 v1.2.0 h1:UCvDKl1L/fmBygl2Y7hubXCnY7t4Yj46ZrBFNUipFbM= gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -1905,7 +1893,6 @@ gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0E gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -1950,8 +1937,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.6 h1:W18jzjh8mfPez+AwGLxmOImucz/IFjpNlrKVnaj2YVc= -honnef.co/go/tools v0.0.1-2020.1.6/go.mod h1:pyyisuGw24ruLjrr1ddx39WE0y9OooInRzEYLhQB2YY= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= @@ -1963,6 +1948,7 @@ k8s.io/apiextensions-apiserver v0.17.3/go.mod h1:CJbCyMfkKftAd/X/V6OTHYhVn7zXnDd k8s.io/apiextensions-apiserver v0.17.4/go.mod h1:rCbbbaFS/s3Qau3/1HbPlHblrWpFivoaLYccCffvQGI= k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= +k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= k8s.io/apiextensions-apiserver v0.19.0/go.mod h1:znfQxNpjqz/ZehvbfMg5N6fvBJW5Lqu5HVLTJQdP4Fs= k8s.io/apiextensions-apiserver v0.20.0/go.mod h1:ZH+C33L2Bh1LY1+HphoRmN1IQVLTShVcTojivK3N9xg= k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= @@ -1978,6 +1964,7 @@ k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw= k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= +k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= @@ -1990,7 +1977,6 @@ k8s.io/cli-runtime v0.17.4/go.mod h1:IVW4zrKKx/8gBgNNkhiUIc7nZbVVNhc1+HcQh+PiNHc k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ= k8s.io/cli-runtime v0.19.0/go.mod h1:tun9l0eUklT8IHIM0jors17KmUjcrAxn0myoBYwuNuo= k8s.io/cli-runtime v0.20.0/go.mod h1:C5tewU1SC1t09D7pmkk83FT4lMAw+bvMDuRxA7f0t2s= -k8s.io/cli-runtime v0.20.5/go.mod h1:ihjPeQWDk7NGVIkNEvpwxA3gJvqtU+LtkDj11TvyXn4= k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= k8s.io/code-generator v0.0.0-20200214080538-dc8f3adce97c/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= @@ -2002,14 +1988,13 @@ k8s.io/component-base v0.17.3/go.mod h1:GeQf4BrgelWm64PXkIXiPh/XS0hnO42d9gx9BtbZ k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= +k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.5/go.mod h1:l0isoBLGyQKwRoTWbPHR6jNDd3/VqQD43cNlsjddGng= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-helpers v0.20.0/go.mod h1:nx6NOtfSfGOxnSZsDJxpGbnsVuUA1UXpwDvZIrtigNk= -k8s.io/component-helpers v0.20.5/go.mod h1:AzTdoPj6YAN2SUfhBX/FUUU3ntfFuse03q/VMLovEsE= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20191010091904-7fa3014cb28f/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -2033,6 +2018,7 @@ k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580/go.mod h1:BXM9ceUBTj2QnfH k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= @@ -2043,7 +2029,6 @@ k8s.io/kubectl v0.17.4/go.mod h1:im5QWmh6fvtmJkkNm4HToLe8z9aM3jihYK5X/wOybcY= k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU= k8s.io/kubectl v0.19.0/go.mod h1:gPCjjsmE6unJzgaUNXIFGZGafiUp5jh0If3F/x7/rRg= k8s.io/kubectl v0.20.0/go.mod h1:8x5GzQkgikz7M2eFGGuu6yOfrenwnw5g4RXOUgbjR1M= -k8s.io/kubectl v0.20.5/go.mod h1:mlNQgyV18D4XFt5BmfSkrxQNS+arT2pXDQxxnH5lMiw= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= k8s.io/metrics v0.17.3/go.mod h1:HEJGy1fhHOjHggW9rMDBJBD3YuGroH3Y1pnIRw9FFaI= @@ -2051,7 +2036,6 @@ k8s.io/metrics v0.17.4/go.mod h1:6rylW2iD3M9VppnEAAtJASY1XS8Pt9tcYh+tHxBeV3I= k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4= k8s.io/metrics v0.19.0/go.mod h1:WykpW8B60OeAJx1imdwUgyOID2kDljr/Q+1zrPJ98Wo= k8s.io/metrics v0.20.0/go.mod h1:9yiRhfr8K8sjdj2EthQQE9WvpYDvsXIV3CjN4Ruq4Jw= -k8s.io/metrics v0.20.5/go.mod h1:vsptOayjKWKWHvWR1vFQY++vxydzaEo/2+JC7kSDKPU= k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= @@ -2060,8 +2044,9 @@ k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index 96706c7d8..d8611b5fc 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -223,14 +223,14 @@ func (c *controller) waitAndUpdateNodesStatus() bool { if common.IsK8sNodeIsReady(node) { log.Infof("Found new ready node %s with inventory id %s, kubernetes id %s, updating its status to %s", node.Name, host.Host.ID.String(), node.Status.NodeInfo.SystemUUID, models.HostStageDone) - if err := c.ic.UpdateHostInstallProgress(ctxReq, host.Host.ID.String(), models.HostStageDone, ""); err != nil { + if err := c.ic.UpdateHostInstallProgress(ctxReq, host.Host.InfraEnvID.String(), host.Host.ID.String(), models.HostStageDone, ""); err != nil { log.WithError(err).Errorf("Failed to update node %s installation status", node.Name) continue } } else if host.Host.Progress.CurrentStage == models.HostStageConfiguring { log.Infof("Found new joined node %s with inventory id %s, kubernetes id %s, updating its status to %s", node.Name, host.Host.ID.String(), node.Status.NodeInfo.SystemUUID, models.HostStageJoined) - if err := c.ic.UpdateHostInstallProgress(ctxReq, host.Host.ID.String(), models.HostStageJoined, ""); err != nil { + if err := c.ic.UpdateHostInstallProgress(ctxReq, host.Host.InfraEnvID.String(), host.Host.ID.String(), models.HostStageJoined, ""); err != nil { log.WithError(err).Errorf("Failed to update node %s installation status", node.Name) continue } diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 86990fb4e..8f949b8c5 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -96,15 +96,16 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops = ops.NewMockOps(ctrl) mockbmclient = inventory_client.NewMockInventoryClient(ctrl) mockk8sclient = k8s_client.NewMockK8SClient(ctrl) + infraEnvId := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f50") node0Id := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f65") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") node2Id := strfmt.UUID("b898d516-3e16-49d0-86a5-0ad5bd04e3ed") currentState := models.HostProgressInfo{CurrentStage: models.HostStageConfiguring} currentStatus := models.HostStatusInstallingInProgress inventoryNamesIds = map[string]inventory_client.HostData{ - "node0": {Host: &models.Host{ID: &node0Id, Progress: ¤tState, Status: ¤tStatus}}, - "node1": {Host: &models.Host{ID: &node1Id, Progress: ¤tState, Status: ¤tStatus}}, - "node2": {Host: &models.Host{ID: &node2Id, Progress: ¤tState, Status: ¤tStatus}}} + "node0": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node0Id, Progress: ¤tState, Status: ¤tStatus}}, + "node1": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node1Id, Progress: ¤tState, Status: ¤tStatus}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node2Id, Progress: ¤tState, Status: ¤tStatus}}} kubeNamesIds = map[string]string{"node0": "6d6f00e8-70dd-48a5-859a-0f1459485ad9", "node1": "2834ff2e-8965-48a5-859a-0f1459485a77", "node2": "57df89ee-3546-48a5-859a-0f1459485a66"} @@ -123,17 +124,19 @@ var _ = Describe("installer HostRoleMaster role", func() { configuringSuccess := func() { mockk8sclient.EXPECT().GetPods(gomock.Any(), gomock.Any(), "").Return([]v1.Pod{}, nil).AnyTimes() - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).AnyTimes() + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).AnyTimes() } updateProgressSuccess := func(stages []models.HostStage, inventoryNamesIds map[string]inventory_client.HostData) { var hostIds []string + var infraEnvIds []string for _, host := range inventoryNamesIds { hostIds = append(hostIds, host.Host.ID.String()) + infraEnvIds = append(infraEnvIds, host.Host.InfraEnvID.String()) } for i, stage := range stages { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostIds[i], stage, "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvIds[i], hostIds[i], stage, "").Return(nil).Times(1) } } @@ -328,11 +331,13 @@ var _ = Describe("installer HostRoleMaster role", func() { BeforeEach(func() { updateProgressSuccess = func(stages []models.HostStage, inventoryNamesIds map[string]inventory_client.HostData) { var hostIds []string + var infraEnvIds []string for _, host := range inventoryNamesIds { hostIds = append(hostIds, host.Host.ID.String()) + infraEnvIds = append(infraEnvIds, host.Host.InfraEnvID.String()) } for i, stage := range stages { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostIds[i], stage, "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvIds[i], hostIds[i], stage, "").Return(nil).Times(1) } } kubeNamesIds = map[string]string{"node0": "6d6f00e8-70dd-48a5-859a-0f1459485ad9", @@ -381,12 +386,14 @@ var _ = Describe("installer HostRoleMaster role", func() { It("UpdateStatus fails and then succeeds, list nodes failed ", func() { updateProgressSuccessFailureTest := func(stages []models.HostStage, inventoryNamesIds map[string]inventory_client.HostData) { var hostIds []string + var infraEnvIds []string for _, host := range inventoryNamesIds { hostIds = append(hostIds, host.Host.ID.String()) + infraEnvIds = append(infraEnvIds, host.Host.InfraEnvID.String()) } for i, stage := range stages { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostIds[i], stage, "").Return(fmt.Errorf("dummy")).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostIds[i], stage, "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvIds[i], hostIds[i], stage, "").Return(fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvIds[i], hostIds[i], stage, "").Return(nil).Times(1) } } mockk8sclient.EXPECT().ListNodes().Return(GetKubeNodes(kubeNamesIds), nil).Times(2) @@ -1521,11 +1528,12 @@ func getClusterOperatorWithConditionsStatus(availableStatus, degradedStatus conf func create3Hosts(currentStatus string, stage models.HostStage) map[string]inventory_client.HostData { currentState := models.HostProgressInfo{CurrentStage: stage} + infraEnvId := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f50") node0Id := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f65") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") node2Id := strfmt.UUID("b898d516-3e16-49d0-86a5-0ad5bd04e3ed") return map[string]inventory_client.HostData{ - "node0": {Host: &models.Host{ID: &node0Id, Progress: ¤tState, Status: ¤tStatus}}, - "node1": {Host: &models.Host{ID: &node1Id, Progress: ¤tState, Status: ¤tStatus}}, - "node2": {Host: &models.Host{ID: &node2Id, Progress: ¤tState, Status: ¤tStatus}}} + "node0": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node0Id, Progress: ¤tState, Status: ¤tStatus}}, + "node1": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node1Id, Progress: ¤tState, Status: ¤tStatus}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node2Id, Progress: ¤tState, Status: ¤tStatus}}} } diff --git a/src/common/common.go b/src/common/common.go index b8a090e82..749f639c3 100644 --- a/src/common/common.go +++ b/src/common/common.go @@ -59,7 +59,7 @@ func SetConfiguringStatusForHosts(client inventory_client.InventoryClient, inven ctx := utils.GenerateRequestContext() requestLog := utils.RequestIDLogger(ctx, log) requestLog.Infof("Host %s %q found in mcs logs, moving it to %s state", hostName, host.Host.ID.String(), status) - if err := client.UpdateHostInstallProgress(ctx, host.Host.ID.String(), status, ""); err != nil { + if err := client.UpdateHostInstallProgress(ctx, host.Host.InfraEnvID.String(), host.Host.ID.String(), status, ""); err != nil { requestLog.Errorf("Failed to update node installation status, %s", err) continue } diff --git a/src/common/common_test.go b/src/common/common_test.go index 59e4f1a61..30614542a 100644 --- a/src/common/common_test.go +++ b/src/common/common_test.go @@ -33,23 +33,24 @@ var _ = Describe("verify common", func() { var logs string logsInBytes, _ := ioutil.ReadFile("../../test_files/mcs_logs.txt") logs = string(logsInBytes) + infraEnvId := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f250") node0Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f239") node2Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f240") - testInventoryIdsIps := map[string]inventory_client.HostData{"node0": {Host: &models.Host{ID: &node0Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, + testInventoryIdsIps := map[string]inventory_client.HostData{"node0": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node0Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, IPs: []string{"192.168.126.10", "192.168.11.122", "fe80::5054:ff:fe9a:4738"}}, - "node1": {Host: &models.Host{ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, - "node2": {Host: &models.Host{ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleWorker}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} + "node1": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleWorker}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), node1Id.String(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), node2Id.String(), models.HostStageWaitingForIgnition, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId.String(), node1Id.String(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId.String(), node2Id.String(), models.HostStageWaitingForIgnition, gomock.Any()).Return(nil).Times(1) SetConfiguringStatusForHosts(mockbmclient, testInventoryIdsIps, logs, true, l) Expect(testInventoryIdsIps["node0"].Host.Progress.CurrentStage).Should(Equal(models.HostStageRebooting)) Expect(testInventoryIdsIps["node1"].Host.Progress.CurrentStage).Should(Equal(models.HostStageRebooting)) Expect(testInventoryIdsIps["node2"].Host.Progress.CurrentStage).Should(Equal(models.HostStageWaitingForIgnition)) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), node1Id.String(), models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), node2Id.String(), models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId.String(), node1Id.String(), models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId.String(), node2Id.String(), models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) SetConfiguringStatusForHosts(mockbmclient, testInventoryIdsIps, logs, false, l) Expect(testInventoryIdsIps["node1"].Host.Progress.CurrentStage).Should(Equal(models.HostStageConfiguring)) Expect(testInventoryIdsIps["node2"].Host.Progress.CurrentStage).Should(Equal(models.HostStageConfiguring)) diff --git a/src/config/config.go b/src/config/config.go index fbabec49b..44f7c7506 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -15,6 +15,7 @@ import ( type Config struct { Role string ClusterID string + InfraEnvID string HostID string Device string URL string @@ -48,6 +49,7 @@ func ProcessArgs() { ret := &GlobalConfig flag.StringVar(&ret.Role, "role", string(models.HostRoleMaster), "The node role") flag.StringVar(&ret.ClusterID, "cluster-id", "", "The cluster id") + flag.StringVar(&ret.InfraEnvID, "infra-env-id", "", "This host infra env id") flag.StringVar(&ret.HostID, "host-id", "", "This host id") flag.StringVar(&ret.Device, "boot-device", "", "The boot device") flag.StringVar(&ret.URL, "url", "", "The BM inventory URL, including a scheme and optionally a port (overrides the host and port arguments") @@ -96,4 +98,7 @@ func ProcessArgs() { printHelpAndExit() } } + if ret.InfraEnvID == "" { + ret.InfraEnvID = ret.ClusterID + } } diff --git a/src/installer/installer.go b/src/installer/installer.go index df4ea6313..b72f2df4f 100644 --- a/src/installer/installer.go +++ b/src/installer/installer.go @@ -447,7 +447,7 @@ func (i *installer) UpdateHostInstallProgress(newStage models.HostStage, info st log := utils.RequestIDLogger(ctx, i.log) log.Infof("Updating node installation stage: %s - %s", newStage, info) if i.HostID != "" { - if err := i.inventoryClient.UpdateHostInstallProgress(ctx, i.HostID, newStage, info); err != nil { + if err := i.inventoryClient.UpdateHostInstallProgress(ctx, i.Config.InfraEnvID, i.Config.HostID, newStage, info); err != nil { log.Errorf("Failed to update node installation stage, %s", err) } } @@ -612,7 +612,7 @@ func (i *installer) updateReadyMasters(nodes *v1.NodeList, readyMasters *[]strin return fmt.Errorf("Node %s is not in inventory hosts", node.Name) } ctx = utils.GenerateRequestContext() - if err := i.inventoryClient.UpdateHostInstallProgress(ctx, host.Host.ID.String(), models.HostStageJoined, ""); err != nil { + if err := i.inventoryClient.UpdateHostInstallProgress(ctx, host.Host.InfraEnvID.String(), host.Host.ID.String(), models.HostStageJoined, ""); err != nil { utils.RequestIDLogger(ctx, i.log).Errorf("Failed to update node installation status, %s", err) } } diff --git a/src/installer/installer_test.go b/src/installer/installer_test.go index 8703800fd..b6a3b44f0 100644 --- a/src/installer/installer_test.go +++ b/src/installer/installer_test.go @@ -43,6 +43,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockIgnition *ignition.MockIgnition installerObj *installer hostId = "host-id" + infraEnvId = "infra-env-id" bootstrapIgn = "bootstrap.ign" openShiftVersion = "4.7" inventoryNamesHost map[string]inventory_client.HostData @@ -89,9 +90,9 @@ var _ = Describe("installer HostRoleMaster role", func() { updateProgressSuccess := func(stages [][]string) { for _, stage := range stages { if len(stage) == 2 { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStage(stage[0]), stage[1]).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStage(stage[0]), stage[1]).Return(nil).Times(1) } else { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStage(stage[0]), "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStage(stage[0]), "").Return(nil).Times(1) } } } @@ -109,7 +110,7 @@ var _ = Describe("installer HostRoleMaster role", func() { } waitForControllerSuccessfully := func(clusterId string) { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStageWaitingForController, "waiting for controller pod ready event").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStageWaitingForController, "waiting for controller pod ready event").Return(nil).Times(1) mockk8sclient.EXPECT().GetPods("assisted-installer", gomock.Any(), "").Return([]v1.Pod{{TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{Name: common.AssistedControllerPrefix + "aasdasd"}, Status: v1.PodStatus{Phase: "Running"}}}, nil).Times(1) @@ -133,12 +134,13 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient = inventory_client.NewMockInventoryClient(ctrl) mockk8sclient = k8s_client.NewMockK8SClient(ctrl) mockIgnition = ignition.NewMockIgnition(ctrl) + nodesInfraEnvId := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f50") node0Id := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f65") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") node2Id := strfmt.UUID("b898d516-3e16-49d0-86a5-0ad5bd04e3ed") - inventoryNamesHost = map[string]inventory_client.HostData{"node0": {Host: &models.Host{ID: &node0Id}, IPs: []string{"192.168.126.10"}}, - "node1": {Host: &models.Host{ID: &node1Id}, IPs: []string{"192.168.126.11"}}, - "node2": {Host: &models.Host{ID: &node2Id}, IPs: []string{"192.168.126.12"}}} + inventoryNamesHost = map[string]inventory_client.HostData{"node0": {Host: &models.Host{InfraEnvID: nodesInfraEnvId, ID: &node0Id}, IPs: []string{"192.168.126.10"}}, + "node1": {Host: &models.Host{InfraEnvID: nodesInfraEnvId, ID: &node1Id}, IPs: []string{"192.168.126.11"}}, + "node2": {Host: &models.Host{InfraEnvID: nodesInfraEnvId, ID: &node2Id}, IPs: []string{"192.168.126.12"}}} }) k8sBuilder := func(configPath string, logger *logrus.Logger) (k8s_client.K8SClient, error) { return mockk8sclient, nil @@ -148,6 +150,7 @@ var _ = Describe("installer HostRoleMaster role", func() { conf := config.Config{Role: string(models.HostRoleBootstrap), ClusterID: "cluster-id", + InfraEnvID: "infra-env-id", HostID: "host-id", Device: "/dev/vda", URL: "https://assisted-service.com:80", @@ -194,11 +197,11 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().ListMasterNodes().Return(GetKubeNodes(map[string]string{}), nil).Times(1) kubeNamesIds = map[string]string{"node0": "7916fa89-ea7a-443e-a862-b3e930309f65"} mockk8sclient.EXPECT().ListMasterNodes().Return(GetKubeNodes(kubeNamesIds), nil).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), inventoryNamesHost["node0"].Host.ID.String(), models.HostStageJoined, "").Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), inventoryNamesHost["node0"].Host.InfraEnvID.String(), inventoryNamesHost["node0"].Host.ID.String(), models.HostStageJoined, "").Times(1) kubeNamesIds = map[string]string{"node0": "7916fa89-ea7a-443e-a862-b3e930309f65", "node1": "eb82821f-bf21-4614-9a3b-ecb07929f238"} mockk8sclient.EXPECT().ListMasterNodes().Return(GetKubeNodes(kubeNamesIds), nil).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), inventoryNamesHost["node1"].Host.ID.String(), models.HostStageJoined, "").Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), inventoryNamesHost["node1"].Host.InfraEnvID.String(), inventoryNamesHost["node1"].Host.ID.String(), models.HostStageJoined, "").Times(1) } getNetworkTypeSuccessOpenshiftSDN := func() { mockk8sclient.EXPECT().GetNetworkType().Return("OpenshiftSDN", nil).Times(2) @@ -219,7 +222,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops.EXPECT().PrepareController().Return(nil).Times(1) } waitForBootkubeSuccess := func() { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStageWaitingForBootkube, "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStageWaitingForBootkube, "").Return(nil).Times(1) mockops.EXPECT().ExecPrivilegeCommand(gomock.Any(), "stat", "/opt/openshift/.bootkube.done").Return("OK", nil).Times(1) } bootkubeStatusSuccess := func() { @@ -408,6 +411,7 @@ var _ = Describe("installer HostRoleMaster role", func() { conf := config.Config{Role: string(models.HostRoleBootstrap), ClusterID: "cluster-id", + InfraEnvID: "infra-env-id", HostID: "host-id", Device: "/dev/vda", URL: "https://assisted-service.com:80", @@ -426,7 +430,7 @@ var _ = Describe("installer HostRoleMaster role", func() { It("waitForController reload get pods fails then succeeds", func() { reportLogProgressSuccess() - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStageWaitingForController, "waiting for controller pod ready event").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStageWaitingForController, "waiting for controller pod ready event").Return(nil).Times(1) mockk8sclient.EXPECT().GetPods("assisted-installer", gomock.Any(), "").Return(nil, fmt.Errorf("dummy")).Times(1) mockk8sclient.EXPECT().ListEvents(assistedControllerNamespace).Return(&events, nil).Times(1) err := installerObj.waitForController(mockk8sclient) @@ -436,14 +440,15 @@ var _ = Describe("installer HostRoleMaster role", func() { var logs string logsInBytes, _ := ioutil.ReadFile("../../test_files/mcs_logs.txt") logs = string(logsInBytes) + infraEnvID := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f250") node0Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f239") node2Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f240") - testInventoryIdsIps := map[string]inventory_client.HostData{"node0": {Host: &models.Host{ID: &node0Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, + testInventoryIdsIps := map[string]inventory_client.HostData{"node0": {Host: &models.Host{InfraEnvID: infraEnvID, ID: &node0Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.10", "192.168.11.122", "fe80::5054:ff:fe9a:4738"}}, - "node1": {Host: &models.Host{ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, - "node2": {Host: &models.Host{ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} + "node1": {Host: &models.Host{InfraEnvID: infraEnvID, ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvID, ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} mockbmclient.EXPECT().GetEnabledHostsNamesHosts(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().GetEnabledHostsNamesHosts(gomock.Any(), gomock.Any()).Return(testInventoryIdsIps, nil).Times(1) mockops.EXPECT().GetMCSLogs().Return("", fmt.Errorf("dummy")).Times(1) @@ -451,9 +456,9 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops.EXPECT().GetMCSLogs().Return("dummy logs", nil).Times(1) mockops.EXPECT().GetMCSLogs().Return(logs, nil).AnyTimes() - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f240", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f239", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f250", "eb82821f-bf21-4614-9a3b-ecb07929f240", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f250", "eb82821f-bf21-4614-9a3b-ecb07929f239", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() @@ -464,12 +469,13 @@ var _ = Describe("installer HostRoleMaster role", func() { var logs string logsInBytes, _ := ioutil.ReadFile("../../test_files/mcs_logs.txt") logs = string(logsInBytes) + infraEnvId := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f250") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f239") node2Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f240") testInventoryIdsIps := map[string]inventory_client.HostData{ - "node1": {Host: &models.Host{ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, - "node2": {Host: &models.Host{ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} + "node1": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} mockbmclient.EXPECT().GetEnabledHostsNamesHosts(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().GetEnabledHostsNamesHosts(gomock.Any(), gomock.Any()).Return(testInventoryIdsIps, nil).Times(1) mockops.EXPECT().GetMCSLogs().Return("", fmt.Errorf("dummy")).Times(1) @@ -477,9 +483,9 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops.EXPECT().GetMCSLogs().Return("dummy logs", nil).Times(1) mockops.EXPECT().GetMCSLogs().Return(logs, nil).AnyTimes() - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f240", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f239", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), gomock.Any(), gomock.Any(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f250", "eb82821f-bf21-4614-9a3b-ecb07929f240", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "eb82821f-bf21-4614-9a3b-ecb07929f250", "eb82821f-bf21-4614-9a3b-ecb07929f239", models.HostStageConfiguring, gomock.Any()).Return(nil).Times(1) ctx, cancel := context.WithCancel(context.Background()) defer cancel() installerObj.updateConfiguringStatus(ctx) @@ -489,6 +495,7 @@ var _ = Describe("installer HostRoleMaster role", func() { installerArgs := []string{"-n", "--append-karg", "nameserver=8.8.8.8"} conf := config.Config{Role: string(models.HostRoleMaster), ClusterID: "cluster-id", + InfraEnvID: "infra-env-id", HostID: "host-id", Device: "/dev/vda", URL: "https://assisted-service.com:80", @@ -596,6 +603,7 @@ var _ = Describe("installer HostRoleMaster role", func() { Context("Worker role", func() { conf := config.Config{Role: string(models.HostRoleWorker), ClusterID: "cluster-id", + InfraEnvID: "infra-env-id", HostID: "host-id", Device: "/dev/vda", URL: "https://assisted-service.com:80", @@ -628,6 +636,7 @@ var _ = Describe("installer HostRoleMaster role", func() { conf := config.Config{Role: string(models.HostRoleMaster), ClusterID: "cluster-id", + InfraEnvID: "infra-env-id", HostID: "host-id", Device: "/dev/vda", URL: "https://assisted-service.com:80", @@ -674,7 +683,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops.EXPECT().PrepareController().Return(nil).Times(1) } waitForBootkubeSuccess := func() { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), hostId, models.HostStageWaitingForBootkube, "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId, hostId, models.HostStageWaitingForBootkube, "").Return(nil).Times(1) mockops.EXPECT().ExecPrivilegeCommand(gomock.Any(), "stat", "/opt/openshift/.bootkube.done").Return("OK", nil).Times(1) } bootkubeStatusSuccess := func() { diff --git a/src/inventory_client/inventory_client.go b/src/inventory_client/inventory_client.go index cca2e44fe..14ff9eeb7 100644 --- a/src/inventory_client/inventory_client.go +++ b/src/inventory_client/inventory_client.go @@ -43,7 +43,7 @@ const ( type InventoryClient interface { DownloadFile(ctx context.Context, filename string, dest string) error DownloadHostIgnition(ctx context.Context, hostID string, dest string) error - UpdateHostInstallProgress(ctx context.Context, hostId string, newStage models.HostStage, info string) error + UpdateHostInstallProgress(ctx context.Context, infraEnvId string, hostId string, newStage models.HostStage, info string) error GetEnabledHostsNamesHosts(ctx context.Context, log logrus.FieldLogger) (map[string]HostData, error) UploadIngressCa(ctx context.Context, ingressCA string, clusterId string) error GetCluster(ctx context.Context) (*models.Cluster, error) @@ -53,7 +53,7 @@ type InventoryClient interface { GetHosts(ctx context.Context, log logrus.FieldLogger, skippedStatuses []string) (map[string]HostData, error) UploadLogs(ctx context.Context, clusterId string, logsType models.LogsType, upfile io.Reader) error ClusterLogProgressReport(ctx context.Context, clusterId string, progress models.LogsState) - HostLogProgressReport(ctx context.Context, clusterId string, hostId string, progress models.LogsState) + HostLogProgressReport(ctx context.Context, infraEnvId string, hostId string, progress models.LogsState) UpdateClusterOperator(ctx context.Context, clusterId string, operatorName string, operatorStatus models.OperatorStatus, operatorStatusInfo string) error } @@ -207,8 +207,8 @@ func (c *inventoryClient) DownloadHostIgnition(ctx context.Context, hostID strin return aserror.GetAssistedError(err) } -func (c *inventoryClient) UpdateHostInstallProgress(ctx context.Context, hostId string, newStage models.HostStage, info string) error { - _, err := c.ai.Installer.UpdateHostInstallProgress(ctx, c.createUpdateHostInstallProgressParams(hostId, newStage, info)) +func (c *inventoryClient) UpdateHostInstallProgress(ctx context.Context, infraEnvId, hostId string, newStage models.HostStage, info string) error { + _, err := c.ai.Installer.V2UpdateHostInstallProgress(ctx, c.createUpdateHostInstallProgressParams(infraEnvId, hostId, newStage, info)) return aserror.GetAssistedError(err) } @@ -291,10 +291,10 @@ func (c *inventoryClient) createDownloadParams(filename string) *installer.Downl } } -func (c *inventoryClient) createUpdateHostInstallProgressParams(hostId string, newStage models.HostStage, info string) *installer.UpdateHostInstallProgressParams { - return &installer.UpdateHostInstallProgressParams{ - ClusterID: c.clusterId, - HostID: strfmt.UUID(hostId), +func (c *inventoryClient) createUpdateHostInstallProgressParams(infraEnvId, hostId string, newStage models.HostStage, info string) *installer.V2UpdateHostInstallProgressParams { + return &installer.V2UpdateHostInstallProgressParams{ + InfraEnvID: strfmt.UUID(infraEnvId), + HostID: strfmt.UUID(hostId), HostProgress: &models.HostProgress{ CurrentStage: newStage, ProgressInfo: info, diff --git a/src/inventory_client/inventory_client_test.go b/src/inventory_client/inventory_client_test.go index 3cfc2d866..985f8bf2d 100644 --- a/src/inventory_client/inventory_client_test.go +++ b/src/inventory_client/inventory_client_test.go @@ -29,10 +29,11 @@ const ( var _ = Describe("inventory_client_tests", func() { var ( - clusterID = "cluster-id" - logger = logrus.New() - client *inventoryClient - server *ghttp.Server + clusterID = "cluster-id" + infraEnvID = "infra-env-id" + logger = logrus.New() + client *inventoryClient + server *ghttp.Server ) AfterEach(func() { @@ -62,26 +63,26 @@ var _ = Describe("inventory_client_tests", func() { It("positive_response", func() { server.Start() - expectServerCall(server, fmt.Sprintf("/api/assisted-install/v1/clusters/%s/hosts/%s/progress", clusterID, hostID), expectedJson, http.StatusOK) - Expect(client.UpdateHostInstallProgress(context.Background(), hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) + expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusOK) + Expect(client.UpdateHostInstallProgress(context.Background(), infraEnvID, hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) Expect(server.ReceivedRequests()).Should(HaveLen(1)) }) It("negative_server_error_response", func() { server.Start() - Expect(client.UpdateHostInstallProgress(context.Background(), hostID, models.HostStageInstalling, "")).Should(HaveOccurred()) + Expect(client.UpdateHostInstallProgress(context.Background(), infraEnvID, hostID, models.HostStageInstalling, "")).Should(HaveOccurred()) Expect(server.ReceivedRequests()).Should(HaveLen(testMaxRetries + 1)) }) It("positive_late_response", func() { server.Start() - expectServerCall(server, fmt.Sprintf("/api/assisted-install/v1/clusters/%s/hosts/%s/progress", clusterID, hostID), expectedJson, http.StatusInternalServerError) - expectServerCall(server, fmt.Sprintf("/api/assisted-install/v1/clusters/%s/hosts/%s/progress", clusterID, hostID), expectedJson, http.StatusForbidden) - expectServerCall(server, fmt.Sprintf("/api/assisted-install/v1/clusters/%s/hosts/%s/progress", clusterID, hostID), expectedJson, http.StatusOK) + expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusInternalServerError) + expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusForbidden) + expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusOK) - Expect(client.UpdateHostInstallProgress(context.Background(), hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) + Expect(client.UpdateHostInstallProgress(context.Background(), infraEnvID, hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) Expect(server.ReceivedRequests()).Should(HaveLen(3)) }) @@ -89,18 +90,18 @@ var _ = Describe("inventory_client_tests", func() { go func() { time.Sleep(testRetryMaxDelay * 2) - expectServerCall(server, fmt.Sprintf("/api/assisted-install/v1/clusters/%s/hosts/%s/progress", clusterID, hostID), expectedJson, http.StatusOK) + expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusOK) server.Start() }() - Expect(client.UpdateHostInstallProgress(context.Background(), hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) + Expect(client.UpdateHostInstallProgress(context.Background(), infraEnvID, hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) Expect(server.ReceivedRequests()).Should(HaveLen(1)) }) It("server_down", func() { server.Start() server.Close() - Expect(client.UpdateHostInstallProgress(context.Background(), hostID, models.HostStageInstalling, "")).Should(HaveOccurred()) + Expect(client.UpdateHostInstallProgress(context.Background(), infraEnvID, hostID, models.HostStageInstalling, "")).Should(HaveOccurred()) }) }) }) diff --git a/src/inventory_client/mock_inventory_client.go b/src/inventory_client/mock_inventory_client.go index ac3872910..0284d803c 100644 --- a/src/inventory_client/mock_inventory_client.go +++ b/src/inventory_client/mock_inventory_client.go @@ -66,17 +66,17 @@ func (mr *MockInventoryClientMockRecorder) DownloadHostIgnition(ctx, hostID, des } // UpdateHostInstallProgress mocks base method -func (m *MockInventoryClient) UpdateHostInstallProgress(ctx context.Context, hostId string, newStage models.HostStage, info string) error { +func (m *MockInventoryClient) UpdateHostInstallProgress(ctx context.Context, infraEnvId, hostId string, newStage models.HostStage, info string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateHostInstallProgress", ctx, hostId, newStage, info) + ret := m.ctrl.Call(m, "UpdateHostInstallProgress", ctx, infraEnvId, hostId, newStage, info) ret0, _ := ret[0].(error) return ret0 } // UpdateHostInstallProgress indicates an expected call of UpdateHostInstallProgress -func (mr *MockInventoryClientMockRecorder) UpdateHostInstallProgress(ctx, hostId, newStage, info interface{}) *gomock.Call { +func (mr *MockInventoryClientMockRecorder) UpdateHostInstallProgress(ctx, infraEnvId, hostId, newStage, info interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHostInstallProgress", reflect.TypeOf((*MockInventoryClient)(nil).UpdateHostInstallProgress), ctx, hostId, newStage, info) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHostInstallProgress", reflect.TypeOf((*MockInventoryClient)(nil).UpdateHostInstallProgress), ctx, infraEnvId, hostId, newStage, info) } // GetEnabledHostsNamesHosts mocks base method @@ -209,15 +209,15 @@ func (mr *MockInventoryClientMockRecorder) ClusterLogProgressReport(ctx, cluster } // HostLogProgressReport mocks base method -func (m *MockInventoryClient) HostLogProgressReport(ctx context.Context, clusterId, hostId string, progress models.LogsState) { +func (m *MockInventoryClient) HostLogProgressReport(ctx context.Context, infraEnvId, hostId string, progress models.LogsState) { m.ctrl.T.Helper() - m.ctrl.Call(m, "HostLogProgressReport", ctx, clusterId, hostId, progress) + m.ctrl.Call(m, "HostLogProgressReport", ctx, infraEnvId, hostId, progress) } // HostLogProgressReport indicates an expected call of HostLogProgressReport -func (mr *MockInventoryClientMockRecorder) HostLogProgressReport(ctx, clusterId, hostId, progress interface{}) *gomock.Call { +func (mr *MockInventoryClientMockRecorder) HostLogProgressReport(ctx, infraEnvId, hostId, progress interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HostLogProgressReport", reflect.TypeOf((*MockInventoryClient)(nil).HostLogProgressReport), ctx, clusterId, hostId, progress) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HostLogProgressReport", reflect.TypeOf((*MockInventoryClient)(nil).HostLogProgressReport), ctx, infraEnvId, hostId, progress) } // UpdateClusterOperator mocks base method diff --git a/src/ops/coreos_installer_log_writer.go b/src/ops/coreos_installer_log_writer.go index 6b71fd875..d1bf3cc93 100644 --- a/src/ops/coreos_installer_log_writer.go +++ b/src/ops/coreos_installer_log_writer.go @@ -21,15 +21,17 @@ type CoreosInstallerLogWriter struct { lastLogLine []byte progressReporter inventory_client.InventoryClient progressRegex *regexp.Regexp + infraEnvID string hostID string lastProgress int } -func NewCoreosInstallerLogWriter(logger *logrus.Logger, progressReporter inventory_client.InventoryClient, hostID string) *CoreosInstallerLogWriter { +func NewCoreosInstallerLogWriter(logger *logrus.Logger, progressReporter inventory_client.InventoryClient, infraEnvID string, hostID string) *CoreosInstallerLogWriter { return &CoreosInstallerLogWriter{log: logger, lastLogLine: []byte{}, progressReporter: progressReporter, progressRegex: regexp.MustCompile(`(.*?)\((.*?\%)\)\s*`), + infraEnvID: infraEnvID, hostID: hostID, lastProgress: 0, } @@ -60,7 +62,7 @@ func (l *CoreosInstallerLogWriter) reportProgress() { if currentPercent >= l.lastProgress+MinProgressDelta || (currentPercent == completed && l.lastProgress != completed) { // If the progress is more than 5% report it ctx := utils.GenerateRequestContext() - if err := l.progressReporter.UpdateHostInstallProgress(ctx, l.hostID, models.HostStageWritingImageToDisk, match[2]); err == nil { + if err := l.progressReporter.UpdateHostInstallProgress(ctx, l.infraEnvID, l.hostID, models.HostStageWritingImageToDisk, match[2]); err == nil { l.lastProgress = currentPercent } } diff --git a/src/ops/coreos_installer_log_writer_test.go b/src/ops/coreos_installer_log_writer_test.go index 4d211a043..7b6f8de5b 100644 --- a/src/ops/coreos_installer_log_writer_test.go +++ b/src/ops/coreos_installer_log_writer_test.go @@ -30,9 +30,9 @@ var _ = Describe("Verify CoreosInstallerLogger", func() { updateProgressSuccess := func(stages [][]string) { for _, stage := range stages { if len(stage) == 2 { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "hostID", models.HostStage(stage[0]), stage[1]).Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "infraEnvID", "hostID", models.HostStage(stage[0]), stage[1]).Return(nil).Times(1) } else { - mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "hostID", models.HostStage(stage[0]), "").Return(nil).Times(1) + mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), "infraEnvID", "hostID", models.HostStage(stage[0]), "").Return(nil).Times(1) } } } @@ -41,7 +41,7 @@ var _ = Describe("Verify CoreosInstallerLogger", func() { logger, hook = test.NewNullLogger() ctrl := gomock.NewController(GinkgoT()) mockbmclient = inventory_client.NewMockInventoryClient(ctrl) - cilogger = NewCoreosInstallerLogWriter(logger, mockbmclient, "hostID") + cilogger = NewCoreosInstallerLogWriter(logger, mockbmclient, "infraEnvID", "hostID") }) It("test log with new line", func() { _, err := cilogger.Write([]byte("some log with a new line \n")) diff --git a/src/ops/ops.go b/src/ops/ops.go index ee8b66a4e..43adf1738 100644 --- a/src/ops/ops.go +++ b/src/ops/ops.go @@ -178,7 +178,7 @@ func (o *ops) SystemctlAction(action string, args ...string) error { func (o *ops) WriteImageToDisk(ignitionPath string, device string, progressReporter inventory_client.InventoryClient, extraArgs []string) error { allArgs := installerArgs(ignitionPath, device, extraArgs) o.log.Infof("Writing image and ignition to disk with arguments: %v", allArgs) - _, err := o.ExecPrivilegeCommand(NewCoreosInstallerLogWriter(o.log, progressReporter, config.GlobalConfig.HostID), + _, err := o.ExecPrivilegeCommand(NewCoreosInstallerLogWriter(o.log, progressReporter, config.GlobalConfig.InfraEnvID, config.GlobalConfig.HostID), "coreos-installer", allArgs...) return err } From f3800cfa3d64ce6dcd6f7b73f0578bb99bfdaf7a Mon Sep 17 00:00:00 2001 From: Igal Tsoiref Date: Mon, 9 Aug 2021 12:42:34 +0300 Subject: [PATCH 19/43] MGMT-7450: Removing pull secret token from failure logs (#340) Adding function that will change token value to --- src/ops/ops.go | 11 +++++++---- src/ops/ops_test.go | 12 ++++++++---- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/ops/ops.go b/src/ops/ops.go index 43adf1738..46569b208 100644 --- a/src/ops/ops.go +++ b/src/ops/ops.go @@ -101,17 +101,20 @@ type ExecCommandError struct { WaitStatus int } +func removePullSecret(s []string) []string { + return strings.Split(strings.ReplaceAll(strings.Join(s, " "), config.GlobalConfig.PullSecretToken, ""), " ") +} + func (e *ExecCommandError) Error() string { lastOutput := e.Output if len(e.Output) > 200 { lastOutput = "... " + e.Output[len(e.Output)-200:] } - - return fmt.Sprintf("failed executing %s %v, Error %s, LastOutput \"%s\"", e.Command, e.Args, e.ExitErr, lastOutput) + return fmt.Sprintf("failed executing %s %v, Error %s, LastOutput \"%s\"", e.Command, removePullSecret(e.Args), e.ExitErr, lastOutput) } func (e *ExecCommandError) DetailedError() string { - return fmt.Sprintf("failed executing %s %v, env vars %v, error %s, waitStatus %d, Output \"%s\"", e.Command, e.Args, e.Env, e.ExitErr, e.WaitStatus, e.Output) + return fmt.Sprintf("failed executing %s %v, env vars %v, error %s, waitStatus %d, Output \"%s\"", e.Command, removePullSecret(e.Args), removePullSecret(e.Env), e.ExitErr, e.WaitStatus, e.Output) } // ExecCommand executes command. @@ -156,7 +159,7 @@ func (o *ops) ExecCommand(liveLogger io.Writer, command string, args ...string) } return output, execErr } - o.log.Debug("Command executed:", " command", command, " arguments", args, "env vars", cmd.Env, "output", output) + o.log.Debug("Command executed:", " command", command, " arguments", removePullSecret(args), "env vars", removePullSecret(cmd.Env), "output", output) return output, err } diff --git a/src/ops/ops_test.go b/src/ops/ops_test.go index 6148b3d18..f3239bee0 100644 --- a/src/ops/ops_test.go +++ b/src/ops/ops_test.go @@ -5,9 +5,13 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/openshift/assisted-installer/src/config" ) var _ = Describe("ExecCommandError", func() { + pullSecret := "TEST-TOKEN" + config.GlobalConfig.PullSecretToken = pullSecret + It("Creates the correct error for mkdir", func() { err := &ExecCommandError{ Command: "mkdir", @@ -26,15 +30,15 @@ var _ = Describe("ExecCommandError", func() { It("Creates the correct error for ignition extract", func() { err := &ExecCommandError{ Command: "nsenter", - Args: []string{"-t", "1", "-m", "-i", "--", "podman", "run", "--net", "host", "--volume", "/:/rootfs:rw", "--volume", "/usr/bin/rpm-ostree:/usr/bin/rpm-ostree", "--privileged", "--entrypoint", "/usr/bin/machine-config-daemon", "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221", "start", "--node-name", "localhost", "--root-mount", "/rootfs", "--once-from", "/opt/install-dir/bootstrap.ign", "--skip-reboot"}, - Env: []string{"HOME=/home/userZ"}, + Args: []string{"-t", "1", "-m", "-i", "--", "podman", "run", "--net", "host", "--volume", "/:/rootfs:rw", "--volume", "/usr/bin/rpm-ostree:/usr/bin/rpm-ostree", "--privileged", "--entrypoint", "/usr/bin/machine-config-daemon", "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221", "start", "--node-name", "localhost", "--root-mount", "/rootfs", "--once-from", "/opt/install-dir/bootstrap.ign", "--skip-reboot", "--pull-secret", pullSecret}, + Env: []string{"HOME=/home/userZ", fmt.Sprintf("PULL_SECRET_TOKEN=%s", pullSecret)}, ExitErr: fmt.Errorf("exit status 255"), WaitStatus: 255, Output: "Trying to pull quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221...\nGetting image source signatures\nCopying blob sha256:74cbb6607642df5f9f70e8588e3c56d6de795d1a9af22866ea4cc82f2dad4f14\nCopying blob sha256:c9fa7d57b9028d4bd02b51cef3c3039fa7b23a8b2d9d26a6ce66b3428f6e2457\nCopying blob sha256:c676df4ac84e718ecee4f8129e43e9c2b7492942606cc65f1fc5e6f3da413160\nCopying blob sha256:b147db91a07555d29ed6085e4733f34dbaa673076488caa8f95f4677f55b3a5c\nCopying blob sha256:ad956945835b7630565fc23fcbd8194eef32b4300c28546d574b2a377fe5d0a5\nCopying config sha256:c4356549f53a30a1baefc5d1515ec1ab8b3786a4bf1738c0abaedc0e44829498\nWriting manifest to image destination\nStoring signatures\nI1019 19:03:28.797092 1 start.go:108] Version: v4.6.0-202008262209.p0-dirty (16d243c4bed178f5d4fd400c0518ebf1dbaface8)\nI1019 19:03:28.797227 1 start.go:118] Calling chroot(\"/rootfs\")\nI1019 19:03:28.797307 1 rpm-ostree.go:261] Running captured: rpm-ostree status --json\nerror: Timeout was reached\nF1019 19:04:35.869592 1 start.go:147] Failed to initialize single run daemon: error reading osImageURL from rpm-ostree: error running rpm-ostree status --json: : exit status 1)", } - wantError := `failed executing nsenter [-t 1 -m -i -- podman run --net host --volume /:/rootfs:rw --volume /usr/bin/rpm-ostree:/usr/bin/rpm-ostree --privileged --entrypoint /usr/bin/machine-config-daemon quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221 start --node-name localhost --root-mount /rootfs --once-from /opt/install-dir/bootstrap.ign --skip-reboot], Error exit status 255, LastOutput "... or: Timeout was reached + wantError := `failed executing nsenter [-t 1 -m -i -- podman run --net host --volume /:/rootfs:rw --volume /usr/bin/rpm-ostree:/usr/bin/rpm-ostree --privileged --entrypoint /usr/bin/machine-config-daemon quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221 start --node-name localhost --root-mount /rootfs --once-from /opt/install-dir/bootstrap.ign --skip-reboot --pull-secret ], Error exit status 255, LastOutput "... or: Timeout was reached F1019 19:04:35.869592 1 start.go:147] Failed to initialize single run daemon: error reading osImageURL from rpm-ostree: error running rpm-ostree status --json: : exit status 1)"` - wantDetailedError := `failed executing nsenter [-t 1 -m -i -- podman run --net host --volume /:/rootfs:rw --volume /usr/bin/rpm-ostree:/usr/bin/rpm-ostree --privileged --entrypoint /usr/bin/machine-config-daemon quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221 start --node-name localhost --root-mount /rootfs --once-from /opt/install-dir/bootstrap.ign --skip-reboot], env vars [HOME=/home/userZ], error exit status 255, waitStatus 255, Output "Trying to pull quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221... + wantDetailedError := `failed executing nsenter [-t 1 -m -i -- podman run --net host --volume /:/rootfs:rw --volume /usr/bin/rpm-ostree:/usr/bin/rpm-ostree --privileged --entrypoint /usr/bin/machine-config-daemon quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221 start --node-name localhost --root-mount /rootfs --once-from /opt/install-dir/bootstrap.ign --skip-reboot --pull-secret ], env vars [HOME=/home/userZ PULL_SECRET_TOKEN=], error exit status 255, waitStatus 255, Output "Trying to pull quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc1a34f55c712b2b9c5e5a14dd85e67cbdae11fd147046ac2fef9eaf179ab221... Getting image source signatures Copying blob sha256:74cbb6607642df5f9f70e8588e3c56d6de795d1a9af22866ea4cc82f2dad4f14 Copying blob sha256:c9fa7d57b9028d4bd02b51cef3c3039fa7b23a8b2d9d26a6ce66b3428f6e2457 From 4c04303fd097f1de236d9fbeb046b005cb746de4 Mon Sep 17 00:00:00 2001 From: Eran Cohen Date: Mon, 9 Aug 2021 17:58:36 +0300 Subject: [PATCH 20/43] NO-ISSUE: remove obsolete installation-timeout parameter (#341) --- src/config/config.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/config/config.go b/src/config/config.go index 44f7c7506..72222f2ac 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -24,7 +24,6 @@ type Config struct { MCOImage string ControllerImage string AgentImage string - InstallationTimeout uint PullSecretToken string `secret:"true"` SkipCertVerification bool CACertPath string @@ -60,8 +59,6 @@ func ProcessArgs() { "Assisted Installer Controller image URL") flag.StringVar(&ret.AgentImage, "agent-image", "quay.io/ocpmetal/assisted-installer-agent:latest", "Assisted Installer Agent image URL that will be used to send logs on successful installation") - // Remove installation-timeout once the assisted-service stop sending it. - flag.UintVar(&ret.InstallationTimeout, "installation-timeout", 120, "Installation timeout in minutes - OBSOLETE") flag.BoolVar(&ret.SkipCertVerification, "insecure", false, "Do not validate TLS certificate") flag.StringVar(&ret.CACertPath, "cacert", "", "Path to custom CA certificate in PEM format") flag.StringVar(&ret.HTTPProxy, "http-proxy", "", "A proxy URL to use for creating HTTP connections outside the cluster") From 3f66cb7ca27bfbca4d2425a8943c9745fd181aee Mon Sep 17 00:00:00 2001 From: Igal Tsoiref Date: Tue, 10 Aug 2021 18:19:58 +0300 Subject: [PATCH 21/43] OCPBUGSM-31802: hosts that were moved to ready state in k8s, didn't send (#342) joined status to the cluster. Now they will send joined and only then done --- .../assisted_installer_controller.go | 17 ++++++++++------- .../assisted_installer_controller_test.go | 6 +++++- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index d8611b5fc..c6c0ec0bc 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -220,6 +220,16 @@ func (c *controller) waitAndUpdateNodesStatus() bool { continue } + + if host.Host.Progress.CurrentStage == models.HostStageConfiguring { + log.Infof("Found new joined node %s with inventory id %s, kubernetes id %s, updating its status to %s", + node.Name, host.Host.ID.String(), node.Status.NodeInfo.SystemUUID, models.HostStageJoined) + if err := c.ic.UpdateHostInstallProgress(ctxReq, host.Host.InfraEnvID.String(), host.Host.ID.String(), models.HostStageJoined, ""); err != nil { + log.WithError(err).Errorf("Failed to update node %s installation status", node.Name) + continue + } + } + if common.IsK8sNodeIsReady(node) { log.Infof("Found new ready node %s with inventory id %s, kubernetes id %s, updating its status to %s", node.Name, host.Host.ID.String(), node.Status.NodeInfo.SystemUUID, models.HostStageDone) @@ -227,13 +237,6 @@ func (c *controller) waitAndUpdateNodesStatus() bool { log.WithError(err).Errorf("Failed to update node %s installation status", node.Name) continue } - } else if host.Host.Progress.CurrentStage == models.HostStageConfiguring { - log.Infof("Found new joined node %s with inventory id %s, kubernetes id %s, updating its status to %s", - node.Name, host.Host.ID.String(), node.Status.NodeInfo.SystemUUID, models.HostStageJoined) - if err := c.ic.UpdateHostInstallProgress(ctxReq, host.Host.InfraEnvID.String(), host.Host.ID.String(), models.HostStageJoined, ""); err != nil { - log.WithError(err).Errorf("Failed to update node %s installation status", node.Name) - continue - } } } c.updateConfiguringStatusIfNeeded(assistedNodesMap) diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 8f949b8c5..9d92ba1d6 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -100,7 +100,7 @@ var _ = Describe("installer HostRoleMaster role", func() { node0Id := strfmt.UUID("7916fa89-ea7a-443e-a862-b3e930309f65") node1Id := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") node2Id := strfmt.UUID("b898d516-3e16-49d0-86a5-0ad5bd04e3ed") - currentState := models.HostProgressInfo{CurrentStage: models.HostStageConfiguring} + currentState := models.HostProgressInfo{CurrentStage: models.HostStageJoined} currentStatus := models.HostStatusInstallingInProgress inventoryNamesIds = map[string]inventory_client.HostData{ "node0": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node0Id, Progress: ¤tState, Status: ¤tStatus}}, @@ -247,6 +247,10 @@ var _ = Describe("installer HostRoleMaster role", func() { }) It("waitAndUpdateNodesStatus happy flow - all nodes installing", func() { + + updateProgressSuccess([]models.HostStage{models.HostStageJoined, + models.HostStageJoined, + models.HostStageJoined}, inventoryNamesIds) updateProgressSuccess(defaultStages, inventoryNamesIds) hosts := create3Hosts(models.HostStatusInstalling, models.HostStageConfiguring) From 52a722deac5af824684a148de6be1753ae1e844c Mon Sep 17 00:00:00 2001 From: Ori Amizur <60868946+ori-amizur@users.noreply.github.com> Date: Tue, 17 Aug 2021 15:48:45 +0300 Subject: [PATCH 22/43] MGMT-7178: Assisted-installer should not reboot worker till 2 masters join (#343) --- src/installer/installer.go | 32 ++++++++++++++++++++++++++++++++ src/installer/installer_test.go | 18 ++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/src/installer/installer.go b/src/installer/installer.go index b72f2df4f..41ce4b40c 100644 --- a/src/installer/installer.go +++ b/src/installer/installer.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/go-openapi/swag" "github.com/google/uuid" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -133,6 +134,11 @@ func (i *installer) InstallNode() error { } i.log.Info("Setting bootstrap node new role to master") + } else if i.Config.Role == string(models.HostRoleWorker) { + // Wait for 2 masters to be ready before rebooting + if err = i.workerWaitFor2ReadyMasters(ctx); err != nil { + return err + } } //upload host logs and report log status before reboot i.log.Infof("Uploading logs and reporting status before rebooting the node %s for cluster %s", i.Config.HostID, i.Config.ClusterID) @@ -378,6 +384,32 @@ func (i *installer) waitForControlPlane(ctx context.Context) error { return nil } +func numDoneMasters(cluster *models.Cluster) int { + numDoneMasters := 0 + for _, h := range cluster.Hosts { + if h.Role == models.HostRoleMaster && h.Progress.CurrentStage == models.HostStageDone { + numDoneMasters++ + } + } + return numDoneMasters +} + +func (i *installer) workerWaitFor2ReadyMasters(ctx context.Context) error { + i.log.Info("Waiting for 2 ready masters") + i.UpdateHostInstallProgress(models.HostStageWaitingForControlPlane, "") + for { + cluster, err := i.inventoryClient.GetCluster(ctx) + if err != nil { + i.log.WithError(err).Errorf("Getting cluster %s", i.ClusterID) + return err + } + if swag.StringValue(cluster.Kind) == models.ClusterKindAddHostsCluster || numDoneMasters(cluster) >= minMasterNodes { + return nil + } + time.Sleep(generalWaitInterval) + } +} + func (i *installer) shouldControlPlaneReplicasPatchApplied(kc k8s_client.K8SClient) (bool, error) { controlPlanePatchRequired, err := utils.IsVersionLessThan47(i.Config.OpenshiftVersion) if err != nil { diff --git a/src/installer/installer_test.go b/src/installer/installer_test.go index b6a3b44f0..26c2450f0 100644 --- a/src/installer/installer_test.go +++ b/src/installer/installer_test.go @@ -617,8 +617,26 @@ var _ = Describe("installer HostRoleMaster role", func() { updateProgressSuccess([][]string{{string(models.HostStageStartingInstallation), conf.Role}, {string(models.HostStageInstalling), conf.Role}, {string(models.HostStageWritingImageToDisk)}, + {string(models.HostStageWaitingForControlPlane)}, {string(models.HostStageRebooting)}, }) + cluster := models.Cluster{ + Hosts: []*models.Host{ + { + Role: models.HostRoleMaster, + Progress: &models.HostProgressInfo{ + CurrentStage: models.HostStageDone, + }, + }, + { + Role: models.HostRoleMaster, + Progress: &models.HostProgressInfo{ + CurrentStage: models.HostStageDone, + }, + }, + }, + } + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&cluster, nil).Times(1) cleanInstallDevice() mkdirSuccess(InstallDir) downloadHostIgnitionSuccess(hostId, "worker-host-id.ign") From f663d719f2ed68a82037eee1f742b011b52e7d3d Mon Sep 17 00:00:00 2001 From: Yuval Goldberg Date: Sun, 22 Aug 2021 12:07:38 +0300 Subject: [PATCH 23/43] MGMT-7597: Remove ronniel1, razregev, and asalkeld from OWNERS (#345) --- OWNERS_ALIASES | 4 ---- 1 file changed, 4 deletions(-) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 59820b870..2b48a317a 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -9,13 +9,10 @@ aliases: - gamli75 - ori-amizur - oshercc - - razregev - romfreiman - - ronniel1 - tsorya - yevgeny-shnaidman - yuvigold - - masayag - nmagnezi - carbonin - rollandf @@ -23,7 +20,6 @@ aliases: - ybettan - slaviered - osherdp - - asalkeld - flaper87 - mkowalski code-reviewers: From bbbebdaf4fa79ff31f61d978b5a17a35cb28f27d Mon Sep 17 00:00:00 2001 From: Eran Cohen Date: Sun, 22 Aug 2021 23:42:36 +0300 Subject: [PATCH 24/43] MGMT-7635 Fix logs gathering on SNO when failing to complete bootstrapping (#346) * NO-ISSUE: remove obsolete installation-timeout parameter * MGMT-7635: Fix logs gathering on SNO when failing to complete bootstrapping The log_sender command failed to mount /root/.ssh due to: "no such file or directory" error. This code ensure the directory get created once the bootstrap flow begin --- src/installer/installer.go | 10 ++++++---- src/installer/installer_test.go | 5 ++++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/installer/installer.go b/src/installer/installer.go index 41ce4b40c..c47bad4af 100644 --- a/src/installer/installer.go +++ b/src/installer/installer.go @@ -201,6 +201,12 @@ func (i *installer) writeImageToDisk(ignitionPath string) error { func (i *installer) startBootstrap() error { i.log.Infof("Running bootstrap") + // This is required for the log collection command to work since it will try to mount this directory + // This directory is also required by `generateSshKeyPair` as it will place the key there + if err := i.ops.Mkdir(sshDir); err != nil { + i.log.WithError(err).Error("Failed to create SSH dir") + return err + } ignitionFileName := "bootstrap.ign" ignitionPath, err := i.getFileFromService(ignitionFileName) if err != nil { @@ -295,10 +301,6 @@ func (i *installer) extractIgnitionToFS(ignitionPath string) (err error) { func (i *installer) generateSshKeyPair() error { i.log.Info("Generating new SSH key pair") - if err := i.ops.Mkdir(sshDir); err != nil { - i.log.WithError(err).Error("Failed to create SSH dir") - return err - } if _, err := i.ops.ExecPrivilegeCommand(utils.NewLogWriter(i.log), "ssh-keygen", "-q", "-f", sshKeyPath, "-N", ""); err != nil { i.log.WithError(err).Error("Failed to generate SSH key pair") return err diff --git a/src/installer/installer_test.go b/src/installer/installer_test.go index 26c2450f0..1ac2f357c 100644 --- a/src/installer/installer_test.go +++ b/src/installer/installer_test.go @@ -233,7 +233,6 @@ var _ = Describe("installer HostRoleMaster role", func() { mockops.EXPECT().ExtractFromIgnition(filepath.Join(InstallDir, bootstrapIgn), dockerConfigFile).Return(nil).Times(1) } generateSshKeyPairSuccess := func() { - mkdirSuccess(sshDir) mockops.EXPECT().ExecPrivilegeCommand(gomock.Any(), "ssh-keygen", "-q", "-f", sshKeyPath, "-N", "").Return("OK", nil).Times(1) } createOpenshiftSshManifestSuccess := func() { @@ -242,6 +241,7 @@ var _ = Describe("installer HostRoleMaster role", func() { bootstrapSetup := func() { cleanInstallDevice() + mkdirSuccess(sshDir) mkdirSuccess(InstallDir) downloadFileSuccess(bootstrapIgn) extractSecretFromIgnitionSuccess() @@ -330,6 +330,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }) cleanInstallDevice() mkdirSuccess(InstallDir) + mkdirSuccess(sshDir) downloadFileSuccess(bootstrapIgn) extractSecretFromIgnitionSuccess() extractIgnitionToFS("Success", nil) @@ -378,6 +379,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }) cleanInstallDevice() mkdirSuccess(InstallDir) + mkdirSuccess(sshDir) downloadFileSuccess(bootstrapIgn) downloadHostIgnitionSuccess(hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) @@ -713,6 +715,7 @@ var _ = Describe("installer HostRoleMaster role", func() { singleNodeBootstrapSetup := func() { cleanInstallDevice() mkdirSuccess(InstallDir) + mkdirSuccess(sshDir) downloadFileSuccess(bootstrapIgn) extractSecretFromIgnitionSuccess() extractIgnitionToFS("Success", nil) From e346bc34fd2b3fb8d6c2cf6c0bb2125a565a198d Mon Sep 17 00:00:00 2001 From: Ondra Machacek Date: Mon, 23 Aug 2021 21:17:19 +0200 Subject: [PATCH 25/43] MGMT-7292: Wait for the OLM to be initilized before CR apply (#333) --- .../assisted_installer_controller.go | 139 ++++++++++++++++-- .../assisted_installer_controller_test.go | 103 +++++++++---- .../operator_handler.go | 28 +++- src/ops/mock_ops.go | 2 +- src/ops/ops.go | 21 ++- src/utils/utils.go | 10 +- 6 files changed, 251 insertions(+), 52 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index c6c0ec0bc..d8d8b12b9 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -2,6 +2,7 @@ package assisted_installer_controller import ( "context" + "encoding/base64" "encoding/json" "fmt" "io" @@ -44,11 +45,12 @@ const ( dnsServiceName = "dns-default" dnsServiceNamespace = "openshift-dns" dnsOperatorNamespace = "openshift-dns-operator" + maxFetchAttempts = 5 maxDeletionAttempts = 5 maxDNSServiceIPAttempts = 45 KeepWaiting = false ExitWaiting = true - customManifestsFile = "custom_manifests.yaml" + customManifestsFile = "custom_manifests.json" kubeconfigFileName = "kubeconfig-noingress" ) @@ -61,6 +63,7 @@ var ( CompleteTimeout = 30 * time.Minute DNSAddressRetryInterval = 20 * time.Second DeletionRetryInterval = 10 * time.Second + FetchRetryInterval = 10 * time.Second LongWaitTimeout = 10 * time.Hour CVOMaxTimeout = 3 * time.Hour ) @@ -100,6 +103,14 @@ type controller struct { kc k8s_client.K8SClient } +// manifest store the operator manifest used by assisted-installer to create CRs of the OLM: +type manifest struct { + // name of the operator the CR manifest we want create + name string + // content of the manifest of the opreator + content string +} + func NewController(log *logrus.Logger, cfg ControllerConfig, ops ops.Ops, ic inventory_client.InventoryClient, kc k8s_client.K8SClient) *controller { return &controller{ log: log, @@ -459,16 +470,55 @@ func (c controller) postInstallConfigs(ctx context.Context) error { return errors.Wrapf(err, "Timeout while waiting for console to become available") } + // Wait for OLM operators + if err = c.waitForOLMOperators(ctx); err != nil { + return errors.Wrapf(err, "Error while initializing OLM operators") + } + + return nil +} + +func (c controller) waitForOLMOperators(ctx context.Context) error { + var operators []models.MonitoredOperator + var err error + + // Get the monitored operators: + err = utils.Retry(maxFetchAttempts, FetchRetryInterval, c.log, func() error { + operators, err = c.ic.GetClusterMonitoredOLMOperators(context.TODO(), c.ClusterID) + if err != nil { + return errors.Wrapf(err, "Error while fetch the monitored operators from assisted-service.") + } + return nil + }) + if err != nil { + return errors.Wrapf(err, "Failed to fetch monitored operators") + } + if len(operators) == 0 { + c.log.Info("No OLM operators found.") + return nil + } + + // Get maximum wait timeout for OLM operators: + waitTimeout := c.getMaximumOLMTimeout(operators) + c.log.Infof("OLM operators %v wait timeout %v", waitTimeout, operators) + + // Wait for the CSV state of the OLM operators, before applying OLM CRs + err = utils.WaitForPredicateParamsWithContext(ctx, waitTimeout, GeneralWaitInterval, c.waitForCSVBeCreated, operators) + if err != nil { + // We continue in case of failure, because we want to try to apply manifest at least for operators which are ready. + c.log.WithError(err).Warnf("Failed to wait for some of the OLM operators to be initilized") + } + // Apply post install manifests - err = utils.WaitForPredicateWithContext(ctx, retryPostManifestTimeout, GeneralWaitInterval, c.applyPostInstallManifests) + err = utils.WaitForPredicateParamsWithContext(ctx, retryPostManifestTimeout, GeneralWaitInterval, c.applyPostInstallManifests, operators) if err != nil { return errors.Wrapf(err, "Failed to apply post manifests") } - if err != c.waitForOLMOperators(ctx) { + if err != c.waitForCSV(ctx, waitTimeout) { // In case the timeout occur, we have to update the pending OLM operators to failed state, // so the assisted-service can update the cluster state to completed. - if err = c.updatePendingOLMOperators(); err != nil { + if err = c.updatePendingOLMOperators(ctx); err != nil { return errors.Errorf("Timeout while waiting for some of the operators and not able to update its state") } return errors.Wrapf(err, "Timeout while waiting for OLM operators be installed") @@ -477,7 +527,32 @@ func (c controller) postInstallConfigs(ctx context.Context) error { return nil } -func (c controller) applyPostInstallManifests() bool { +func (c controller) getReadyOperators(operators []models.MonitoredOperator) ([]string, []models.MonitoredOperator, error) { + var readyOperators []string + for index := range operators { + handler := NewClusterServiceVersionHandler(c.kc, &operators[index], c.Status) + if handler.IsInitialized() { + readyOperators = append(readyOperators, handler.GetName()) + } + } + return readyOperators, operators, nil +} + +func (c controller) waitForCSVBeCreated(arg interface{}) bool { + operators := arg.([]models.MonitoredOperator) + readyOperators, operators, err := c.getReadyOperators(operators) + if err != nil { + c.log.WithError(err).Warn("Error while fetch the operators state.") + return false + } + if len(operators) == len(readyOperators) { + return true + } + + return false +} + +func (c controller) applyPostInstallManifests(arg interface{}) bool { ctx := utils.GenerateRequestContext() tempDir, err := ioutil.TempDir("", "controller-custom-manifests-") if err != nil { @@ -497,12 +572,51 @@ func (c controller) applyPostInstallManifests() bool { return false } - err = c.ops.CreateManifests(kubeconfigName, customManifestPath) + // Unmarshall the content of the operators manifests: + var manifests []manifest + data, err := ioutil.ReadFile(customManifestPath) + if err != nil { + c.log.WithError(err).Errorf("Failed to read the custom manifests file.") + return false + } + if err = json.Unmarshal(data, &manifests); err != nil { + c.log.WithError(err).Errorf("Failed to unmarshall custom manifest file content %s.", data) + return false + } + + // Create the manifests of the opreators, which are properly initialized: + readyOperators, _, err := c.getReadyOperators(arg.([]models.MonitoredOperator)) if err != nil { - c.log.WithError(err).Error("Failed to apply manifest file.") + c.log.WithError(err).Errorf("Failed to fetch operators from assisted-service") return false } + for _, manifest := range manifests { + // Check if the operator is properly initialized by CSV: + if !func() bool { + for _, readyOperator := range readyOperators { + if readyOperator == manifest.name { + return true + } + } + return false + }() { + continue + } + + content, err := base64.StdEncoding.DecodeString(manifest.content) + if err != nil { + c.log.WithError(err).Errorf("Failed to decode content of operator CR %s.", manifest.name) + return false + } + + err = c.ops.CreateManifests(kubeconfigName, content) + if err != nil { + c.log.WithError(err).Error("Failed to apply manifest file.") + return false + } + } + return true } @@ -753,7 +867,7 @@ func (c controller) addRouterCAToClusterCA() bool { } -func (c controller) getMaximumOLMTimeout(operators []*models.MonitoredOperator) time.Duration { +func (c controller) getMaximumOLMTimeout(operators []models.MonitoredOperator) time.Duration { timeout := WaitTimeout.Seconds() for _, operator := range operators { timeout = math.Max(float64(operator.TimeoutSeconds), timeout) @@ -777,9 +891,8 @@ func (c controller) getProgressingOLMOperators() ([]*models.MonitoredOperator, e return ret, nil } -func (c controller) updatePendingOLMOperators() error { +func (c controller) updatePendingOLMOperators(ctx context.Context) error { c.log.Infof("Updating pending OLM operators") - ctx := utils.GenerateRequestContext() operators, err := c.getProgressingOLMOperators() if err != nil { return err @@ -795,8 +908,8 @@ func (c controller) updatePendingOLMOperators() error { return nil } -// waitForOLMOperators wait until all OLM monitored operators are available or failed. -func (c controller) waitForOLMOperators(ctx context.Context) error { +// waitForCSV wait until all OLM monitored operators are available or failed. +func (c controller) waitForCSV(ctx context.Context, waitTimeout time.Duration) error { operators, err := c.getProgressingOLMOperators() if err != nil { return err @@ -824,8 +937,6 @@ func (c controller) waitForOLMOperators(ctx context.Context) error { return false } - waitTimeout := c.getMaximumOLMTimeout(operators) - c.log.Infof("Waiting for OLM operators for %v", waitTimeout) return utils.WaitForPredicateWithContext(ctx, waitTimeout, GeneralWaitInterval, areOLMOperatorsAvailable) } diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 9d92ba1d6..0031daa0f 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -222,10 +222,17 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return(operators, nil).Times(1) } - mockApplyPostInstallManifests := func() { - mockbmclient.EXPECT().DownloadFile(gomock.Any(), customManifestsFile, gomock.Any()).Return(nil).Times(1) + mockApplyPostInstallManifests := func(operators []models.MonitoredOperator) { + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return(operators, nil).Times(1) + mockbmclient.EXPECT().DownloadFile(gomock.Any(), customManifestsFile, gomock.Any()).DoAndReturn( + func(ctx context.Context, filename, dest string) error { + if err := ioutil.WriteFile(dest, []byte("[]"), 0644); err != nil { + return err + } + return nil + }, + ).Times(1) mockbmclient.EXPECT().DownloadFile(gomock.Any(), kubeconfigFileName, gomock.Any()).Return(nil).Times(1) - mockops.EXPECT().CreateManifests(gomock.Any(), gomock.Any()).Return(nil).Times(1) } Context("Waiting for 3 nodes", func() { @@ -515,6 +522,54 @@ var _ = Describe("installer HostRoleMaster role", func() { }) }) + Context("waitForCSVBeCreated", func() { + var ( + operatorName = "lso" + subscriptionName = "local-storage-operator" + namespaceName = "openshift-local-storage" + ) + BeforeEach(func() { + assistedController.WaitForClusterVersion = true + GeneralWaitInterval = 1 * time.Millisecond + }) + It("empty operators", func() { + Expect(assistedController.waitForCSVBeCreated([]models.MonitoredOperator{})).Should(Equal(true)) + }) + It("wrong subscription", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + }, + } + + mockk8sclient.EXPECT().GetCSVFromSubscription(operators[0].Namespace, operators[0].SubscriptionName).Return("", fmt.Errorf("dummy")).Times(1) + Expect(assistedController.waitForCSVBeCreated(operators)).Should(Equal(false)) + }) + It("non-initialized operator", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + }, + } + + mockk8sclient.EXPECT().GetCSVFromSubscription(operators[0].Namespace, operators[0].SubscriptionName).Return("", nil).Times(1) + Expect(assistedController.waitForCSVBeCreated(operators)).Should(Equal(false)) + }) + It("initialized operator", func() { + operators := []models.MonitoredOperator{ + { + SubscriptionName: subscriptionName, Namespace: namespaceName, + Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm, + }, + } + + mockk8sclient.EXPECT().GetCSVFromSubscription(operators[0].Namespace, operators[0].SubscriptionName).Return("randomCSV", nil).Times(1) + Expect(assistedController.waitForCSVBeCreated(operators)).Should(Equal(true)) + }) + }) + Context("PostInstallConfigs", func() { Context("waiting for cluster version", func() { BeforeEach(func() { @@ -611,7 +666,6 @@ var _ = Describe("installer HostRoleMaster role", func() { Return(&models.MonitoredOperator{Status: models.OperatorStatusAvailable, StatusInfo: availableClusterVersionCondition.Status.Conditions[0].Message}, nil).Times(1) // Completion - mockApplyPostInstallManifests() mockGetOLMOperators([]models.MonitoredOperator{}) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) @@ -646,7 +700,6 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: &installing}, nil).Times(1) setControllerWaitForOLMOperators(assistedController.ClusterID) mockGetOLMOperators([]models.MonitoredOperator{}) - mockApplyPostInstallManifests() mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) @@ -684,10 +737,12 @@ var _ = Describe("installer HostRoleMaster role", func() { It("waiting for single OLM operator", func() { By("setup", func() { setControllerWaitForOLMOperators(assistedController.ClusterID) - mockApplyPostInstallManifests() - mockGetOLMOperators([]models.MonitoredOperator{ + operators := []models.MonitoredOperator{ {SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: "", TimeoutSeconds: 120 * 60}, - }) + } + mockGetOLMOperators(operators) + mockApplyPostInstallManifests(operators) + mockk8sclient.EXPECT().GetCSVFromSubscription(operators[0].Namespace, operators[0].SubscriptionName).Return("local-storage-operator", nil).Times(2) }) By("empty status", func() { @@ -734,10 +789,9 @@ var _ = Describe("installer HostRoleMaster role", func() { It("waiting for single OLM operator which timeouts", func() { By("setup", func() { setControllerWaitForOLMOperators(assistedController.ClusterID) - mockApplyPostInstallManifests() - mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return( - []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 0}}, nil, - ).AnyTimes() + operators := []models.MonitoredOperator{{SubscriptionName: "local-storage-operator", Namespace: "openshift-local-storage", OperatorType: models.OperatorTypeOlm, Name: "lso", Status: models.OperatorStatusProgressing, TimeoutSeconds: 0}} + mockApplyPostInstallManifests(operators) + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return(operators, nil).AnyTimes() }) By("endless empty status", func() { @@ -1039,12 +1093,12 @@ var _ = Describe("installer HostRoleMaster role", func() { Context("getMaximumOLMTimeout", func() { It("Return general timeout if no OLM's present", func() { - opertors := []*models.MonitoredOperator{} + opertors := []models.MonitoredOperator{} Expect(assistedController.getMaximumOLMTimeout(opertors)).To(Equal(WaitTimeout)) }) It("Return general timeout if OLM's timeout is lower", func() { - opertors := []*models.MonitoredOperator{ + opertors := []models.MonitoredOperator{ { TimeoutSeconds: 0, }, @@ -1054,7 +1108,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }) It("Return maximum from multiple OLM's", func() { - opertors := []*models.MonitoredOperator{ + opertors := []models.MonitoredOperator{ {OperatorType: models.OperatorTypeOlm, TimeoutSeconds: 120 * 60}, {OperatorType: models.OperatorTypeOlm, TimeoutSeconds: 130 * 60}, } @@ -1075,16 +1129,9 @@ var _ = Describe("installer HostRoleMaster role", func() { }) It("List is empty", func() { - mockGetOLMOperators([]models.MonitoredOperator{}) + mockbmclient.EXPECT().GetClusterMonitoredOLMOperators(gomock.Any(), gomock.Any()).Return([]models.MonitoredOperator{}, nil).Times(1) Expect(assistedController.waitForOLMOperators(context.TODO())).To(BeNil()) }) - It("k8s unavailable", func() { - operators := []models.MonitoredOperator{{Name: operatorName, Status: models.OperatorStatusProgressing, OperatorType: models.OperatorTypeOlm}} - mockGetOLMOperators(operators) - mockGetServiceOperators(operators) - mockk8sclient.EXPECT().GetCSVFromSubscription(gomock.Any(), gomock.Any()).Return("", fmt.Errorf("Error")).Times(1) - Expect(assistedController.waitForOLMOperators(context.TODO())).To(HaveOccurred()) - }) It("progressing - no update (empty message)", func() { operators := []models.MonitoredOperator{ { @@ -1099,7 +1146,7 @@ var _ = Describe("installer HostRoleMaster role", func() { operators[0], &olmv1alpha1.ClusterServiceVersion{Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling}}, ) - Expect(assistedController.waitForOLMOperators(context.TODO())).To(HaveOccurred()) + Expect(assistedController.waitForCSV(context.TODO(), WaitTimeout)).To(HaveOccurred()) }) It("progressing - no update (same message)", func() { operators := []models.MonitoredOperator{ @@ -1118,7 +1165,7 @@ var _ = Describe("installer HostRoleMaster role", func() { Status: olmv1alpha1.ClusterServiceVersionStatus{Phase: olmv1alpha1.CSVPhaseInstalling, Message: "same"}, }, ) - Expect(assistedController.waitForOLMOperators(context.TODO())).To(HaveOccurred()) + Expect(assistedController.waitForCSV(context.TODO(), WaitTimeout)).To(HaveOccurred()) }) It("progressing - update (new message)", func() { operators := []models.MonitoredOperator{ @@ -1139,7 +1186,7 @@ var _ = Describe("installer HostRoleMaster role", func() { ) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", operatorName, gomock.Any(), gomock.Any()).Return(nil).Times(1) - Expect(assistedController.waitForOLMOperators(context.TODO())).To(HaveOccurred()) + Expect(assistedController.waitForCSV(context.TODO(), WaitTimeout)).To(HaveOccurred()) }) It("check that we tolerate the failed state reported by CSV", func() { WaitTimeout = WaitTimeout * 10 @@ -1170,7 +1217,7 @@ var _ = Describe("installer HostRoleMaster role", func() { newOperators = append(newOperators, operators...) newOperators[0].Status = models.OperatorStatusAvailable mockGetServiceOperators(newOperators) - Expect(assistedController.waitForOLMOperators(context.TODO())).To(BeNil()) + Expect(assistedController.waitForCSV(context.TODO(), LongWaitTimeout)).To(BeNil()) }) It("multiple OLMs", func() { @@ -1223,7 +1270,7 @@ var _ = Describe("installer HostRoleMaster role", func() { lastOne[0].Status = models.OperatorStatusAvailable mockGetServiceOperators(lastOne) - Expect(assistedController.waitForOLMOperators(context.TODO())).To(BeNil()) + Expect(assistedController.waitForCSV(context.TODO(), LongWaitTimeout)).To(BeNil()) }) }) diff --git a/src/assisted_installer_controller/operator_handler.go b/src/assisted_installer_controller/operator_handler.go index b32efed59..6a000de25 100644 --- a/src/assisted_installer_controller/operator_handler.go +++ b/src/assisted_installer_controller/operator_handler.go @@ -18,6 +18,7 @@ type OperatorHandler interface { GetName() string GetStatus() (models.OperatorStatus, string, error) OnChange(newStatus models.OperatorStatus) bool + IsInitialized() bool } func (c controller) isOperatorAvailable(handler OperatorHandler) bool { @@ -78,6 +79,8 @@ func NewClusterOperatorHandler(kc k8s_client.K8SClient, operatorName string) *Cl func (handler ClusterOperatorHandler) GetName() string { return handler.operatorName } +func (handler ClusterOperatorHandler) IsInitialized() bool { return true } + func (handler ClusterOperatorHandler) GetStatus() (models.OperatorStatus, string, error) { co, err := handler.kc.GetClusterOperator(handler.operatorName) if err != nil { @@ -101,6 +104,8 @@ func NewClusterVersionHandler(kc k8s_client.K8SClient, timer *time.Timer) *Clust func (handler ClusterVersionHandler) GetName() string { return cvoOperatorName } +func (handler ClusterVersionHandler) IsInitialized() bool { return true } + func (handler ClusterVersionHandler) GetStatus() (models.OperatorStatus, string, error) { co, err := handler.kc.GetClusterVersion(clusterVersionName) if err != nil { @@ -135,6 +140,19 @@ func NewClusterServiceVersionHandler(kc k8s_client.K8SClient, operator *models.M func (handler ClusterServiceVersionHandler) GetName() string { return handler.operator.Name } +func (handler ClusterServiceVersionHandler) IsInitialized() bool { + csvName, err := handler.kc.GetCSVFromSubscription(handler.operator.Namespace, handler.operator.SubscriptionName) + if err != nil { + return false + } + + if csvName == "" { + return false + } + + return true +} + func (handler ClusterServiceVersionHandler) GetStatus() (models.OperatorStatus, string, error) { csvName, err := handler.kc.GetCSVFromSubscription(handler.operator.Namespace, handler.operator.SubscriptionName) if err != nil { @@ -152,7 +170,7 @@ func (handler ClusterServiceVersionHandler) GetStatus() (models.OperatorStatus, } func (handler ClusterServiceVersionHandler) OnChange(newStatus models.OperatorStatus) bool { - if utils.IsStatusFailed(newStatus) { + if IsStatusFailed(newStatus) { if handler.retries < failedOperatorRetry { // FIXME: We retry the check of the operator status in case it's in failed state to WA bug 1968606 // Remove this code when bug 1968606 is fixed @@ -164,3 +182,11 @@ func (handler ClusterServiceVersionHandler) OnChange(newStatus models.OperatorSt return true } + +func IsStatusFailed(operatorStatus models.OperatorStatus) bool { + return operatorStatus == models.OperatorStatusFailed +} + +func IsStatusSucceeded(operatorStatus models.OperatorStatus) bool { + return operatorStatus == models.OperatorStatusAvailable +} diff --git a/src/ops/mock_ops.go b/src/ops/mock_ops.go index 5da042397..088cce261 100644 --- a/src/ops/mock_ops.go +++ b/src/ops/mock_ops.go @@ -371,7 +371,7 @@ func (mr *MockOpsMockRecorder) EvaluateDiskSymlink(arg0 interface{}) *gomock.Cal } // CreateManifests mocks base method -func (m *MockOps) CreateManifests(arg0, arg1 string) error { +func (m *MockOps) CreateManifests(arg0 string, arg1 []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateManifests", arg0, arg1) ret0, _ := ret[0].(error) diff --git a/src/ops/ops.go b/src/ops/ops.go index 46569b208..58a462426 100644 --- a/src/ops/ops.go +++ b/src/ops/ops.go @@ -47,7 +47,7 @@ type Ops interface { CreateRandomHostname(hostname string) error GetHostname() (string, error) EvaluateDiskSymlink(string) string - CreateManifests(string, string) error + CreateManifests(string, []byte) error } const ( @@ -568,13 +568,26 @@ func (o *ops) GetHostname() (string, error) { return os.Hostname() } -func (o *ops) CreateManifests(kubeconfig string, manifestFilePath string) error { - command := fmt.Sprintf("oc --kubeconfig=%s apply -f %s", kubeconfig, manifestFilePath) +func (o *ops) CreateManifests(kubeconfig string, content []byte) error { + // Create temp file, where we store the content to be create by oc command: + file, err := ioutil.TempFile("", "operator-manifest") + if err != nil { + return err + } + defer os.Remove(file.Name()) + + // Write the content to the temporary file: + if err = ioutil.WriteFile(file.Name(), content, 0644); err != nil { + return err + } + + // Run oc command that creates the custom manifest: + command := fmt.Sprintf("oc --kubeconfig=%s apply -f %s", kubeconfig, file.Name()) output, err := o.ExecCommand(o.logWriter, "bash", "-c", command) if err != nil { return err } - o.log.Infof("Applying custom manifest file %s succeed %s", manifestFilePath, output) + o.log.Infof("Applying custom manifest file %s succeed %s", file.Name(), output) return nil } diff --git a/src/utils/utils.go b/src/utils/utils.go index f00c5a6ab..457853e72 100644 --- a/src/utils/utils.go +++ b/src/utils/utils.go @@ -222,6 +222,12 @@ func WaitForPredicateWithContext(ctx context.Context, timeout time.Duration, int }) } +func WaitForPredicateParamsWithContext(ctx context.Context, timeout time.Duration, interval time.Duration, predicate func(arg interface{}) bool, arg interface{}) error { + return WaitForPredicateWithTimer(ctx, timeout, interval, func(timer *time.Timer) bool { + return predicate(arg) + }) +} + // ProxyFromEnvVars provides an alternative to http.ProxyFromEnvironment since it is being initialized only // once and that happens by k8s before proxy settings was obtained. While this is no issue for k8s, it prevents // any out-of-cluster traffic from using the proxy @@ -283,10 +289,6 @@ func CsvStatusToOperatorStatus(csvStatus string) models.OperatorStatus { } } -func IsStatusFailed(operatorStatus models.OperatorStatus) bool { - return operatorStatus == models.OperatorStatusFailed -} - func ClusterOperatorConditionsToMonitoredOperatorStatus(conditions []configv1.ClusterOperatorStatusCondition) (models.OperatorStatus, string) { for _, condition := range conditions { if condition.Type == configv1.OperatorAvailable && condition.Status == configv1.ConditionTrue { From cc8d5bb07c39e58b851f2cb27fc884511d077b95 Mon Sep 17 00:00:00 2001 From: Ondra Machacek Date: Wed, 25 Aug 2021 20:13:12 +0200 Subject: [PATCH 26/43] OCPBUGSM-33782: Fix the manifest parsing (#347) This PR is fixing the manifest JSON parsing and improves logging. --- .../assisted_installer_controller.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index d8d8b12b9..cc9978f18 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -106,9 +106,9 @@ type controller struct { // manifest store the operator manifest used by assisted-installer to create CRs of the OLM: type manifest struct { // name of the operator the CR manifest we want create - name string + Name string // content of the manifest of the opreator - content string + Content string } func NewController(log *logrus.Logger, cfg ControllerConfig, ops ops.Ops, ic inventory_client.InventoryClient, kc k8s_client.K8SClient) *controller { @@ -591,11 +591,15 @@ func (c controller) applyPostInstallManifests(arg interface{}) bool { return false } + c.log.Infof("Ready operators to be applied: %v", readyOperators) + for _, manifest := range manifests { + c.log.Infof("Applying manifest %s: %s", manifest.Name, manifest.Content) + // Check if the operator is properly initialized by CSV: if !func() bool { for _, readyOperator := range readyOperators { - if readyOperator == manifest.name { + if readyOperator == manifest.Name { return true } } @@ -604,9 +608,9 @@ func (c controller) applyPostInstallManifests(arg interface{}) bool { continue } - content, err := base64.StdEncoding.DecodeString(manifest.content) + content, err := base64.StdEncoding.DecodeString(manifest.Content) if err != nil { - c.log.WithError(err).Errorf("Failed to decode content of operator CR %s.", manifest.name) + c.log.WithError(err).Errorf("Failed to decode content of operator CR %s.", manifest.Name) return false } @@ -615,6 +619,8 @@ func (c controller) applyPostInstallManifests(arg interface{}) bool { c.log.WithError(err).Error("Failed to apply manifest file.") return false } + + c.log.Infof("Manifest %s applied.", manifest.Name) } return true From f2d2adee1b74965def7e5733c22c619d71740dc8 Mon Sep 17 00:00:00 2001 From: Eran Cohen Date: Wed, 1 Sep 2021 13:41:40 +0300 Subject: [PATCH 27/43] MGMT-7713: Worker won't get updated to 'Waiting for ignition' when installing with IPv6 (#350) Updated the regex to allow more chars between the host IP and 'Ignition' this is required because in the MCS log the host IP is logged as scoped literal IPv6 address e.g. [fe80::ff:fe9d:12ac%ens3]:42692 This should also allow master nodes to get updated to 'Configuring' --- src/common/common.go | 2 +- src/common/common_test.go | 2 +- test_files/mcs_logs.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/common/common.go b/src/common/common.go index 749f639c3..9838c17e3 100644 --- a/src/common/common.go +++ b/src/common/common.go @@ -45,7 +45,7 @@ func SetConfiguringStatusForHosts(client inventory_client.InventoryClient, inven continue } log.Infof("Verifying if host %s pulled ignition", hostName) - pat := fmt.Sprintf("(%s).{1,20}(Ignition)", strings.Join(host.IPs, "|")) + pat := fmt.Sprintf("(%s).{1,40}(Ignition)", strings.Join(host.IPs, "|")) pattern, err := regexp.Compile(pat) if err != nil { log.WithError(err).Errorf("Failed to compile regex from host %s ips list", hostName) diff --git a/src/common/common_test.go b/src/common/common_test.go index 30614542a..38144636d 100644 --- a/src/common/common_test.go +++ b/src/common/common_test.go @@ -41,7 +41,7 @@ var _ = Describe("verify common", func() { IPs: []string{"192.168.126.10", "192.168.11.122", "fe80::5054:ff:fe9a:4738"}}, "node1": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, "node2": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleWorker}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} - + // note that in the MCS log we use node 1 IPv6 address mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId.String(), node1Id.String(), models.HostStageConfiguring, gomock.Any()).Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().UpdateHostInstallProgress(gomock.Any(), infraEnvId.String(), node2Id.String(), models.HostStageWaitingForIgnition, gomock.Any()).Return(nil).Times(1) SetConfiguringStatusForHosts(mockbmclient, testInventoryIdsIps, logs, true, l) diff --git a/test_files/mcs_logs.txt b/test_files/mcs_logs.txt index bfb6b1f94..bf5891ea0 100644 --- a/test_files/mcs_logs.txt +++ b/test_files/mcs_logs.txt @@ -4,6 +4,6 @@ 2020-07-01T16:57:08.449846700+00:00 stderr F I0701 16:57:08.449808 1 api.go:102] Pool master requested by 192.168.126.12:32780 User-Agent:"Ignition/2.6.0" 2020-07-01T16:57:08.449904020+00:00 stderr F I0701 16:57:08.449893 1 bootstrap_server.go:64] reading file "/etc/mcs/bootstrap/machine-pools/master.yaml" 2020-07-01T16:57:08.451073060+00:00 stderr F I0701 16:57:08.451054 1 bootstrap_server.go:84] reading file "/etc/mcs/bootstrap/machine-configs/rendered-master-39287e7d053e8395ab3c1ecd762dd578.yaml" -2020-07-01T16:57:22.319520480+00:00 stderr F I0701 16:57:22.319461 1 api.go:102] Pool master requested by 192.168.126.11:40548 User-Agent:"Ignition/2.6.0" +2020-07-01T16:57:22.319520480+00:00 stderr F I0701 16:57:22.319461 1 api.go:102] Pool master requested by [fe80::5054:ff:fe9a:4739%ens3]:40548 User-Agent:"Ignition/2.6.0" 2020-07-01T16:57:22.319520480+00:00 stderr F I0701 16:57:22.319485 1 bootstrap_server.go:64] reading file "/etc/mcs/bootstrap/machine-pools/master.yaml" 2020-07-01T16:57:22.320165920+00:00 stderr F I0701 16:57:22.320128 1 bootstrap_server.go:84] reading file "/etc/mcs/bootstrap/machine-configs/rendered-master-39287e7d053e8395ab3c1ecd762dd578.yaml" From 3673218609bec42b6cf64e2d81152e2cb25ced91 Mon Sep 17 00:00:00 2001 From: Daniel Erez Date: Sun, 5 Sep 2021 12:54:28 +0300 Subject: [PATCH 28/43] MGMT-7750: get efi file according to cpu architecture (#351) SetBootOrder is using efibootmgr for selecting the correct device. The specified loader should be set with an appropriate efi file according to the runtime CPU architecture. I.e. x86_64 -> shimx64.efi arm64 -> shimaa64.efi --- src/ops/ops.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/ops/ops.go b/src/ops/ops.go index 58a462426..b47775762 100644 --- a/src/ops/ops.go +++ b/src/ops/ops.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "runtime" "strconv" "text/template" @@ -232,7 +233,7 @@ func (o *ops) SetBootOrder(device string) error { o.log.Info("Setting efibootmgr to boot from disk") // efi-system is installed onto partition 2 - _, err = o.ExecPrivilegeCommand(o.logWriter, "efibootmgr", "-d", device, "-p", "2", "-c", "-L", "Red Hat Enterprise Linux", "-l", "\\EFI\\redhat\\shimx64.efi") + _, err = o.ExecPrivilegeCommand(o.logWriter, "efibootmgr", "-d", device, "-p", "2", "-c", "-L", "Red Hat Enterprise Linux", "-l", o.getEfiFilePath()) if err != nil { o.log.Errorf("Failed to set efibootmgr to boot from disk %s, err: %s", device, err) return err @@ -240,6 +241,18 @@ func (o *ops) SetBootOrder(device string) error { return nil } +func (o *ops) getEfiFilePath() string { + var efiFileName string + switch runtime.GOARCH { + case "arm64": + efiFileName = "shimaa64.efi" + default: + efiFileName = "shimx64.efi" + } + o.log.Infof("Using EFI file '%s' for GOARCH '%s'", efiFileName, runtime.GOARCH) + return fmt.Sprintf("\\EFI\\redhat\\%s", efiFileName) +} + func (o *ops) ExtractFromIgnition(ignitionPath string, fileToExtract string) error { o.log.Infof("Getting data from %s", ignitionPath) ignitionData, err := ioutil.ReadFile(ignitionPath) From dbbd369ef45d1520b9bdd7f69d5c565a0841f872 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Wed, 8 Sep 2021 17:58:22 +0100 Subject: [PATCH 29/43] NO-ISSUE: Use -v for efibootmgr to get detailed output (#354) Signed-off-by: Flavio Percoco Co-authored-by: Flavio Percoco --- src/ops/ops.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ops/ops.go b/src/ops/ops.go index b47775762..03abde21b 100644 --- a/src/ops/ops.go +++ b/src/ops/ops.go @@ -233,7 +233,7 @@ func (o *ops) SetBootOrder(device string) error { o.log.Info("Setting efibootmgr to boot from disk") // efi-system is installed onto partition 2 - _, err = o.ExecPrivilegeCommand(o.logWriter, "efibootmgr", "-d", device, "-p", "2", "-c", "-L", "Red Hat Enterprise Linux", "-l", o.getEfiFilePath()) + _, err = o.ExecPrivilegeCommand(o.logWriter, "efibootmgr", "-v", "-d", device, "-p", "2", "-c", "-L", "Red Hat Enterprise Linux", "-l", o.getEfiFilePath()) if err != nil { o.log.Errorf("Failed to set efibootmgr to boot from disk %s, err: %s", device, err) return err From 4f877837986b0d4dfe5d9228bfb8e9f7fb77c888 Mon Sep 17 00:00:00 2001 From: Igal Tsoiref Date: Sun, 19 Sep 2021 13:55:40 +0300 Subject: [PATCH 30/43] NO-ISSUE: upload controller logs with operator status before must-gather (#357) --- .../assisted_installer_controller.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index cc9978f18..117d70a64 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -1003,8 +1003,14 @@ func (c controller) uploadSummaryLogs(podName string, namespace string, sinceSec var ok bool = true ctx := utils.GenerateRequestContext() + // Send upload operator logs before must-gather c.logClusterOperatorsStatus() if c.Status.HasError() || c.Status.HasOperatorError() { + c.log.Infof("Uploading cluster operator status logs before must-gather") + err := common.UploadPodLogs(c.kc, c.ic, c.ClusterID, podName, c.Namespace, controllerLogsSecondsAgo, c.log) + if err != nil { + c.log.WithError(err).Warnf("Failed to upload controller logs") + } c.log.Infof("Uploading oc must-gather logs") images := c.parseMustGatherImages() if tarfile, err := c.collectMustGatherLogs(ctx, images...); err == nil { From f4397ab54f417fffb1fad1b2adc9b0022ca2e31f Mon Sep 17 00:00:00 2001 From: Mateusz Kowalski Date: Wed, 22 Sep 2021 13:23:43 +0200 Subject: [PATCH 31/43] MGMT-7315: Support IP-based matching of nodes during installation (#359) This commit adds an ability to match nodes during the installation using their IP addresses as well as reported hostnames. Currently only the hostname of the node is taken into account and compared against the known inventory. With this PR we are adding a feature that, in case of a name mismatch, performs a scan over IP addresses of the reporting node and nodes in the inventory and if the match is found, accepts the node. This is to cover cases where the node name in the inventory is not an exact match with the name reported by the node itself. Contributes-to: MGMT-7315 --- .../assisted_installer_controller.go | 8 +-- src/common/common.go | 32 ++++++++++ src/common/common_test.go | 59 +++++++++++++++++++ src/installer/installer.go | 6 +- 4 files changed, 98 insertions(+), 7 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index 117d70a64..dad55ece4 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -194,6 +194,7 @@ func (c *controller) waitAndUpdateNodesStatus() bool { log := utils.RequestIDLogger(ctxReq, c.log) assistedNodesMap, err := c.ic.GetHosts(ctxReq, log, ignoreStatuses) + knownIpAddresses := common.BuildHostsMapIPAddressBased(assistedNodesMap) if err != nil { log.WithError(err).Error("Failed to get node map from the assisted service") return KeepWaiting @@ -223,12 +224,9 @@ func (c *controller) waitAndUpdateNodesStatus() bool { return KeepWaiting } for _, node := range nodes.Items { - host, ok := hostsInProgressMap[strings.ToLower(node.Name)] + host, ok := common.HostMatchByNameOrIPAddress(node, hostsInProgressMap, knownIpAddresses) if !ok { - if _, ok := assistedNodesMap[strings.ToLower(node.Name)]; !ok { - log.Warnf("Node %s is not in inventory hosts", strings.ToLower(node.Name)) - } - + log.Warnf("Node %s is not in inventory hosts", strings.ToLower(node.Name)) continue } diff --git a/src/common/common.go b/src/common/common.go index 9838c17e3..a8c1ed126 100644 --- a/src/common/common.go +++ b/src/common/common.go @@ -132,3 +132,35 @@ func IsK8sNodeIsReady(node v1.Node) bool { } return false } + +// BuildHostsMapIPAddressBased builds a map containing all the IP addresses of the hosts in the +// inventory so that later we can match reporting hosts based on the IP and not only on the name. +func BuildHostsMapIPAddressBased(inventoryHostsMap map[string]inventory_client.HostData) map[string]inventory_client.HostData { + knownIpAddresses := map[string]inventory_client.HostData{} + for _, v := range inventoryHostsMap { + for _, ip := range v.IPs { + knownIpAddresses[ip] = v + } + } + return knownIpAddresses +} + +// Matching of the host happens based on 2 rules +// * if the name of the host and in the inventory is exactly the same, use use it +// * if the name is not known in the inventory, we check if the IP address of the +// reporting host is known to the inventory +// Using those rules we can cover the cases where e.g. inventory expects a short +// hostname, but the host reports itself using its FQDN +func HostMatchByNameOrIPAddress(node v1.Node, namesMap, IPAddressMap map[string]inventory_client.HostData) (inventory_client.HostData, bool) { + host, ok := namesMap[strings.ToLower(node.Name)] + if !ok { + for _, ip := range node.Status.Addresses { + _, exists := IPAddressMap[ip.Address] + if exists && ip.Type == v1.NodeInternalIP { + ok = true + host = IPAddressMap[ip.Address] + } + } + } + return host, ok +} diff --git a/src/common/common_test.go b/src/common/common_test.go index 38144636d..7f09ac765 100644 --- a/src/common/common_test.go +++ b/src/common/common_test.go @@ -1,6 +1,7 @@ package common import ( + "encoding/json" "fmt" "io/ioutil" "testing" @@ -13,6 +14,7 @@ import ( "github.com/openshift/assisted-installer/src/inventory_client" "github.com/openshift/assisted-service/models" "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" ) func TestCommon(t *testing.T) { @@ -121,4 +123,61 @@ var _ = Describe("verify common", func() { }) } }) + + Context("Verify name- and IP-based matching", func() { + var testInventoryIdsIps, knownIpAddresses map[string]inventory_client.HostData + var node0Id, node1Id, node2Id strfmt.UUID + + BeforeEach(func() { + infraEnvId := strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f250") + node0Id = strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f238") + node1Id = strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f239") + node2Id = strfmt.UUID("eb82821f-bf21-4614-9a3b-ecb07929f240") + + testInventoryIdsIps = map[string]inventory_client.HostData{"node0": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node0Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, + IPs: []string{"192.168.126.10", "192.168.39.248", "fe80::5054:ff:fe9a:4738"}}, + "node1": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node1Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleMaster}, IPs: []string{"192.168.126.11", "192.168.11.123", "fe80::5054:ff:fe9a:4739"}}, + "node2": {Host: &models.Host{InfraEnvID: infraEnvId, ID: &node2Id, Progress: &models.HostProgressInfo{CurrentStage: models.HostStageRebooting}, Role: models.HostRoleWorker}, IPs: []string{"192.168.126.12", "192.168.11.124", "fe80::5054:ff:fe9a:4740"}}} + knownIpAddresses = BuildHostsMapIPAddressBased(testInventoryIdsIps) + }) + + It("test BuildHostsMapIPAddressBased", func() { + Expect(len(knownIpAddresses)).To(Equal(9)) + Expect(knownIpAddresses["192.168.126.10"].Host.ID).To(Equal(&node0Id)) + Expect(knownIpAddresses["192.168.11.123"].Host.ID).To(Equal(&node1Id)) + Expect(knownIpAddresses["fe80::5054:ff:fe9a:4740"].Host.ID).To(Equal(&node2Id)) + Expect(knownIpAddresses["10.0.0.1"]).To(Equal(inventory_client.HostData{IPs: nil, Inventory: nil, Host: nil})) + }) + + It("test HostMatchByNameOrIPAddress by name", func() { + nodes := GetKubeNodes(map[string]string{"node1": "6d6f00e8-dead-beef-cafe-0f1459485ad9"}) + Expect(len(nodes.Items)).To(Equal(1)) + Expect(nodes.Items[0].Name).To(Equal("node1")) + match, ok := HostMatchByNameOrIPAddress(nodes.Items[0], testInventoryIdsIps, knownIpAddresses) + Expect(ok).To(Equal(true)) + Expect(match.Host.ID).To(Equal(&node1Id)) + }) + + It("test HostMatchByNameOrIPAddress by IP", func() { + nodes := GetKubeNodes(map[string]string{"some-fake-name": "6d6f00e8-dead-beef-cafe-0f1459485ad9"}) + Expect(len(nodes.Items)).To(Equal(1)) + Expect(nodes.Items[0].Name).To(Equal("some-fake-name")) + match, ok := HostMatchByNameOrIPAddress(nodes.Items[0], testInventoryIdsIps, knownIpAddresses) + Expect(ok).To(Equal(true)) + Expect(match.Host.ID).To(Equal(&node0Id)) + }) + }) }) + +func GetKubeNodes(kubeNamesIds map[string]string) *v1.NodeList { + file, _ := ioutil.ReadFile("../../test_files/node.json") + var node v1.Node + _ = json.Unmarshal(file, &node) + nodeList := &v1.NodeList{} + for name, id := range kubeNamesIds { + node.Status.NodeInfo.SystemUUID = id + node.Name = name + nodeList.Items = append(nodeList.Items, node) + } + return nodeList +} diff --git a/src/installer/installer.go b/src/installer/installer.go index c47bad4af..df2731791 100644 --- a/src/installer/installer.go +++ b/src/installer/installer.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "path/filepath" - "strings" "time" "github.com/go-openapi/swag" @@ -634,6 +633,8 @@ func (i *installer) getInventoryHostsMap(hostsMap map[string]inventory_client.Ho func (i *installer) updateReadyMasters(nodes *v1.NodeList, readyMasters *[]string, inventoryHostsMap map[string]inventory_client.HostData) error { nodeNameAndCondition := map[string][]v1.NodeCondition{} + knownIpAddresses := common.BuildHostsMapIPAddressBased(inventoryHostsMap) + for _, node := range nodes.Items { nodeNameAndCondition[node.Name] = node.Status.Conditions if common.IsK8sNodeIsReady(node) && !funk.ContainsString(*readyMasters, node.Name) { @@ -641,7 +642,8 @@ func (i *installer) updateReadyMasters(nodes *v1.NodeList, readyMasters *[]strin log := utils.RequestIDLogger(ctx, i.log) log.Infof("Found a new ready master node %s with id %s", node.Name, node.Status.NodeInfo.SystemUUID) *readyMasters = append(*readyMasters, node.Name) - host, ok := inventoryHostsMap[strings.ToLower(node.Name)] + + host, ok := common.HostMatchByNameOrIPAddress(node, inventoryHostsMap, knownIpAddresses) if !ok { return fmt.Errorf("Node %s is not in inventory hosts", node.Name) } From 6f4c2222f36983578bbbb57502c8e09e12600ef6 Mon Sep 17 00:00:00 2001 From: Yevgeny Shnaidman <60741237+yevgeny-shnaidman@users.noreply.github.com> Date: Wed, 22 Sep 2021 21:59:35 +0300 Subject: [PATCH 32/43] MGMT-7504: move assisted-installer/controller to use V2 APIs for communicating with assisted-service (#358) This PR mainly converts the inventory_client to use V2 APIs instead of V1 for all its internal implementation. Since many function access hosts data now need InfraEnvID, it is taken from the configuration of the assisted-installer (set during install command) --- go.mod | 2 +- go.sum | 6 ++ .../assisted_installer_controller.go | 2 +- .../assisted_installer_controller_test.go | 10 +-- src/installer/installer.go | 4 +- src/installer/installer_test.go | 28 +++---- src/inventory_client/inventory_client.go | 73 ++++++++++++------- src/inventory_client/mock_inventory_client.go | 22 +++++- .../assisted_installer_main.go | 4 +- .../assisted_installer_main_test.go | 10 +-- 10 files changed, 101 insertions(+), 60 deletions(-) diff --git a/go.mod b/go.mod index 200d56eba..34621e5bf 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/onsi/gomega v1.13.0 github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible github.com/openshift/assisted-installer-agent v0.0.0-20200811180147-bc9c7b899b8a - github.com/openshift/assisted-service v1.0.10-0.20210808073533-4afc4b5ae515 + github.com/openshift/assisted-service v1.0.10-0.20210921191140-7dcb60579fdf github.com/openshift/client-go v0.0.0-20201020074620-f8fd44879f7c github.com/openshift/machine-api-operator v0.2.1-0.20201002104344-6abfb5440597 github.com/operator-framework/api v0.8.0 diff --git a/go.sum b/go.sum index c7ec41043..9da6317d7 100644 --- a/go.sum +++ b/go.sum @@ -178,6 +178,7 @@ github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQ github.com/brancz/gojsontoyaml v0.0.0-20190425155809-e8bd32d46b3d/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v1.5.3/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= @@ -903,6 +904,7 @@ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw= github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -1039,6 +1041,10 @@ github.com/openshift/assisted-service v1.0.10-0.20210729090313-b33b6f69330b h1:8 github.com/openshift/assisted-service v1.0.10-0.20210729090313-b33b6f69330b/go.mod h1:06CYHjrS5tanbGRM4ZB3Sd0gcaSIfMkXNjDmZVFAWhs= github.com/openshift/assisted-service v1.0.10-0.20210808073533-4afc4b5ae515 h1:1CnH/Cy9KjPeT3+ThA6nLSUY3cA3IEix1k3rKvNuxJY= github.com/openshift/assisted-service v1.0.10-0.20210808073533-4afc4b5ae515/go.mod h1:06CYHjrS5tanbGRM4ZB3Sd0gcaSIfMkXNjDmZVFAWhs= +github.com/openshift/assisted-service v1.0.10-0.20210919133239-48a7ecedcb85 h1:xU8eqsoc2YenPcD9MAaXDMEQDTmcTsVfbaHoKZ+eemw= +github.com/openshift/assisted-service v1.0.10-0.20210919133239-48a7ecedcb85/go.mod h1:SsuNh9LQjVGO4N8PG5fp8G0LVSkVcWcF158yOCJFAj8= +github.com/openshift/assisted-service v1.0.10-0.20210921191140-7dcb60579fdf h1:wvV0DoZNXFPPLiqv1O4VpjhOhAZW+KcKkZ9w0z/f7Lg= +github.com/openshift/assisted-service v1.0.10-0.20210921191140-7dcb60579fdf/go.mod h1:SsuNh9LQjVGO4N8PG5fp8G0LVSkVcWcF158yOCJFAj8= github.com/openshift/baremetal-operator v0.0.0-20200715132148-0f91f62a41fe h1:bu99IMkaN6o/JcxpWEb1eT8gDdL9hLcwOmfiVIbXWj8= github.com/openshift/baremetal-operator v0.0.0-20200715132148-0f91f62a41fe/go.mod h1:DOgBIuBcXuTD8uub0jL7h6gBdIBt3CFrwz6K2FtfMBA= github.com/openshift/build-machinery-go v0.0.0-20200819073603-48aa266c95f7/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index dad55ece4..e653b7130 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -1098,7 +1098,7 @@ func (c controller) parseMustGatherImages() []string { func (c controller) downloadKubeconfigNoingress(ctx context.Context, dir string) (string, error) { // Download kubeconfig file kubeconfigPath := path.Join(dir, kubeconfigFileName) - err := c.ic.DownloadFile(ctx, kubeconfigFileName, kubeconfigPath) + err := c.ic.DownloadClusterCredentials(ctx, kubeconfigFileName, kubeconfigPath) if err != nil { c.log.Errorf("Failed to download noingress kubeconfig %v\n", err) return "", err diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 0031daa0f..a478f9e2d 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -232,7 +232,7 @@ var _ = Describe("installer HostRoleMaster role", func() { return nil }, ).Times(1) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), kubeconfigFileName, gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().DownloadClusterCredentials(gomock.Any(), kubeconfigFileName, gomock.Any()).Return(nil).Times(1) } Context("Waiting for 3 nodes", func() { @@ -1033,7 +1033,7 @@ var _ = Describe("installer HostRoleMaster role", func() { successUpload() logClusterOperatorsSuccess() mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), assistedController.MustGatherImage).Return("../../test_files/tartest.tar.gz", nil).Times(1) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) + mockbmclient.EXPECT().DownloadClusterCredentials(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) assistedController.Status.Error() callUploadLogs(150 * time.Millisecond) }) @@ -1042,14 +1042,14 @@ var _ = Describe("installer HostRoleMaster role", func() { successUpload() logClusterOperatorsSuccess() mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + mockbmclient.EXPECT().DownloadClusterCredentials(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) callUploadLogs(50 * time.Millisecond) }) It("Validate upload logs exits with no error + failed upload", func() { logClusterOperatorsSuccess() mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + mockbmclient.EXPECT().DownloadClusterCredentials(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) mockbmclient.EXPECT().UploadLogs(gomock.Any(), assistedController.ClusterID, models.LogsTypeController, gomock.Any()).Return(fmt.Errorf("dummy")).AnyTimes() callUploadLogs(50 * time.Millisecond) }) @@ -1059,7 +1059,7 @@ var _ = Describe("installer HostRoleMaster role", func() { logClusterOperatorsSuccess() mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), gomock.Any()).Return("", fmt.Errorf("failed")) mockops.EXPECT().GetMustGatherLogs(gomock.Any(), gomock.Any(), gomock.Any()).Return("../../test_files/tartest.tar.gz", nil) - mockbmclient.EXPECT().DownloadFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2) + mockbmclient.EXPECT().DownloadClusterCredentials(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2) assistedController.Status.Error() callUploadLogs(50 * time.Millisecond) }) diff --git a/src/installer/installer.go b/src/installer/installer.go index df2731791..b5558686a 100644 --- a/src/installer/installer.go +++ b/src/installer/installer.go @@ -141,7 +141,7 @@ func (i *installer) InstallNode() error { } //upload host logs and report log status before reboot i.log.Infof("Uploading logs and reporting status before rebooting the node %s for cluster %s", i.Config.HostID, i.Config.ClusterID) - i.inventoryClient.HostLogProgressReport(ctx, i.Config.ClusterID, i.Config.HostID, models.LogsStateRequested) + i.inventoryClient.HostLogProgressReport(ctx, i.Config.InfraEnvID, i.Config.HostID, models.LogsStateRequested) _, err = i.ops.UploadInstallationLogs(isBootstrap || i.HighAvailabilityMode == models.ClusterHighAvailabilityModeNone) if err != nil { i.log.Errorf("upload installation logs %s", err) @@ -326,7 +326,7 @@ func (i *installer) downloadHostIgnition() (string, error) { log.Infof("Getting %s file", filename) dest := filepath.Join(InstallDir, filename) - err := i.inventoryClient.DownloadHostIgnition(ctx, i.Config.HostID, dest) + err := i.inventoryClient.DownloadHostIgnition(ctx, i.Config.InfraEnvID, i.Config.HostID, dest) if err != nil { log.Errorf("Failed to fetch file (%s) from server. err: %s", filename, err) } diff --git a/src/installer/installer_test.go b/src/installer/installer_test.go index 1ac2f357c..9ed3e5f63 100644 --- a/src/installer/installer_test.go +++ b/src/installer/installer_test.go @@ -66,8 +66,8 @@ var _ = Describe("installer HostRoleMaster role", func() { downloadFileSuccess := func(fileName string) { mockbmclient.EXPECT().DownloadFile(gomock.Any(), fileName, filepath.Join(InstallDir, fileName)).Return(nil).Times(1) } - downloadHostIgnitionSuccess := func(hostID string, fileName string) { - mockbmclient.EXPECT().DownloadHostIgnition(gomock.Any(), hostID, filepath.Join(InstallDir, fileName)).Return(nil).Times(1) + downloadHostIgnitionSuccess := func(infraEnvID string, hostID string, fileName string) { + mockbmclient.EXPECT().DownloadHostIgnition(gomock.Any(), infraEnvID, hostID, filepath.Join(InstallDir, fileName)).Return(nil).Times(1) } reportLogProgressSuccess := func() { @@ -279,7 +279,7 @@ var _ = Describe("installer HostRoleMaster role", func() { resolvConfSuccess() waitForControllerSuccessfully(conf.ClusterID) //HostRoleMaster flow: - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) reportLogProgressSuccess() setBootOrderSuccess(gomock.Any()) @@ -312,7 +312,7 @@ var _ = Describe("installer HostRoleMaster role", func() { resolvConfSuccess() waitForControllerSuccessfully(conf.ClusterID) //HostRoleMaster flow: - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) setBootOrderSuccess(gomock.Any()) uploadLogsSuccess(true) @@ -338,7 +338,7 @@ var _ = Describe("installer HostRoleMaster role", func() { err := fmt.Errorf("generate SSH keys failed") mockops.EXPECT().CreateOpenshiftSshManifest(assistedInstallerSshManifest, sshManifestTmpl, sshPubKeyPath).Return(err).Times(1) //HostRoleMaster flow: - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) setBootOrderSuccess(gomock.Any()) ret := installerObj.InstallNode() @@ -363,7 +363,7 @@ var _ = Describe("installer HostRoleMaster role", func() { resolvConfSuccess() waitForControllerSuccessfully(conf.ClusterID) //HostRoleMaster flow: - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) setBootOrderSuccess(gomock.Any()) uploadLogsSuccess(true) @@ -381,7 +381,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mkdirSuccess(InstallDir) mkdirSuccess(sshDir) downloadFileSuccess(bootstrapIgn) - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) setBootOrderSuccess(gomock.Any()) extractSecretFromIgnitionSuccess() @@ -402,7 +402,7 @@ var _ = Describe("installer HostRoleMaster role", func() { err := fmt.Errorf("Failed to restart NetworkManager") restartNetworkManager(err) //HostRoleMaster flow: - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(gomock.Any()) setBootOrderSuccess(gomock.Any()) ret := installerObj.InstallNode() @@ -517,7 +517,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }) cleanInstallDevice() mkdirSuccess(InstallDir) - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") writeToDiskSuccess(installerArgs) setBootOrderSuccess(gomock.Any()) uploadLogsSuccess(false) @@ -566,7 +566,7 @@ var _ = Describe("installer HostRoleMaster role", func() { cleanInstallDevice() mkdirSuccess(InstallDir) err := fmt.Errorf("failed to fetch file") - mockbmclient.EXPECT().DownloadHostIgnition(gomock.Any(), hostId, filepath.Join(InstallDir, "master-host-id.ign")).Return(err).Times(1) + mockbmclient.EXPECT().DownloadHostIgnition(gomock.Any(), infraEnvId, hostId, filepath.Join(InstallDir, "master-host-id.ign")).Return(err).Times(1) ret := installerObj.InstallNode() Expect(ret).Should(Equal(err)) }) @@ -577,7 +577,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }) cleanInstallDevice() mkdirSuccess(InstallDir) - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") err := fmt.Errorf("failed to write image to disk") mockops.EXPECT().WriteImageToDisk(filepath.Join(InstallDir, "master-host-id.ign"), device, mockbmclient, installerArgs).Return(err).Times(3) ret := installerObj.InstallNode() @@ -591,7 +591,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }) cleanInstallDevice() mkdirSuccess(InstallDir) - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") uploadLogsSuccess(false) reportLogProgressSuccess() writeToDiskSuccess(installerArgs) @@ -641,7 +641,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&cluster, nil).Times(1) cleanInstallDevice() mkdirSuccess(InstallDir) - downloadHostIgnitionSuccess(hostId, "worker-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "worker-host-id.ign") mockops.EXPECT().WriteImageToDisk(filepath.Join(InstallDir, "worker-host-id.ign"), device, mockbmclient, nil).Return(nil).Times(1) setBootOrderSuccess(gomock.Any()) // failure must do nothing @@ -742,7 +742,7 @@ var _ = Describe("installer HostRoleMaster role", func() { //HostRoleMaster flow: verifySingleNodeMasterIgnitionSuccess() singleNodeMergeIgnitionSuccess() - downloadHostIgnitionSuccess(hostId, "master-host-id.ign") + downloadHostIgnitionSuccess(infraEnvId, hostId, "master-host-id.ign") mockops.EXPECT().WriteImageToDisk(singleNodeMasterIgnitionPath, device, mockbmclient, nil).Return(nil).Times(1) setBootOrderSuccess(gomock.Any()) uploadLogsSuccess(true) diff --git a/src/inventory_client/inventory_client.go b/src/inventory_client/inventory_client.go index 14ff9eeb7..2086fdcd9 100644 --- a/src/inventory_client/inventory_client.go +++ b/src/inventory_client/inventory_client.go @@ -42,7 +42,8 @@ const ( //go:generate mockgen -source=inventory_client.go -package=inventory_client -destination=mock_inventory_client.go type InventoryClient interface { DownloadFile(ctx context.Context, filename string, dest string) error - DownloadHostIgnition(ctx context.Context, hostID string, dest string) error + DownloadClusterCredentials(ctx context.Context, filename string, dest string) error + DownloadHostIgnition(ctx context.Context, infraEnvID string, hostID string, dest string) error UpdateHostInstallProgress(ctx context.Context, infraEnvId string, hostId string, newStage models.HostStage, info string) error GetEnabledHostsNamesHosts(ctx context.Context, log logrus.FieldLogger) (map[string]HostData, error) UploadIngressCa(ctx context.Context, ingressCA string, clusterId string) error @@ -184,11 +185,11 @@ func (c *inventoryClient) DownloadFile(ctx context.Context, filename string, des fo.Close() }() c.logger.Infof("Downloading file %s to %s", filename, dest) - _, err = c.ai.Installer.DownloadClusterFiles(ctx, c.createDownloadParams(filename), fo) + _, err = c.ai.Installer.V2DownloadClusterFiles(ctx, c.createDownloadParams(filename), fo) return aserror.GetAssistedError(err) } -func (c *inventoryClient) DownloadHostIgnition(ctx context.Context, hostID string, dest string) error { +func (c *inventoryClient) DownloadClusterCredentials(ctx context.Context, filename string, dest string) error { // open output file fo, err := os.Create(dest) if err != nil { @@ -198,12 +199,32 @@ func (c *inventoryClient) DownloadHostIgnition(ctx context.Context, hostID strin defer func() { fo.Close() }() + c.logger.Infof("Downloading cluster credentials %s to %s", filename, dest) - params := installer.DownloadHostIgnitionParams{ + params := installer.V2DownloadClusterCredentialsParams{ ClusterID: c.clusterId, - HostID: strfmt.UUID(hostID), + FileName: filename, + } + _, err = c.ai.Installer.V2DownloadClusterCredentials(ctx, ¶ms, fo) + return aserror.GetAssistedError(err) +} + +func (c *inventoryClient) DownloadHostIgnition(ctx context.Context, infraEnvID string, hostID string, dest string) error { + // open output file + fo, err := os.Create(dest) + if err != nil { + return err + } + // close fo on exit and check for its returned error + defer func() { + fo.Close() + }() + + params := installer.V2DownloadHostIgnitionParams{ + InfraEnvID: strfmt.UUID(infraEnvID), + HostID: strfmt.UUID(hostID), } - _, err = c.ai.Installer.DownloadHostIgnition(ctx, ¶ms, fo) + _, err = c.ai.Installer.V2DownloadHostIgnition(ctx, ¶ms, fo) return aserror.GetAssistedError(err) } @@ -213,13 +234,13 @@ func (c *inventoryClient) UpdateHostInstallProgress(ctx context.Context, infraEn } func (c *inventoryClient) UploadIngressCa(ctx context.Context, ingressCA string, clusterId string) error { - _, err := c.ai.Installer.UploadClusterIngressCert(ctx, - &installer.UploadClusterIngressCertParams{ClusterID: strfmt.UUID(clusterId), IngressCertParams: models.IngressCertParams(ingressCA)}) + _, err := c.ai.Installer.V2UploadClusterIngressCert(ctx, + &installer.V2UploadClusterIngressCertParams{ClusterID: strfmt.UUID(clusterId), IngressCertParams: models.IngressCertParams(ingressCA)}) return aserror.GetAssistedError(err) } func (c *inventoryClient) GetCluster(ctx context.Context) (*models.Cluster, error) { - cluster, err := c.ai.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: c.clusterId}) + cluster, err := c.ai.Installer.V2GetCluster(ctx, &installer.V2GetClusterParams{ClusterID: c.clusterId}) if err != nil { return nil, err } @@ -228,7 +249,7 @@ func (c *inventoryClient) GetCluster(ctx context.Context) (*models.Cluster, erro } func (c *inventoryClient) GetClusterMonitoredOperator(ctx context.Context, clusterId, operatorName string) (*models.MonitoredOperator, error) { - monitoredOperators, err := c.ai.Operators.ListOfClusterOperators(ctx, &operators.ListOfClusterOperatorsParams{ + monitoredOperators, err := c.ai.Operators.V2ListOfClusterOperators(ctx, &operators.V2ListOfClusterOperatorsParams{ ClusterID: strfmt.UUID(clusterId), OperatorName: &operatorName, }) @@ -240,7 +261,7 @@ func (c *inventoryClient) GetClusterMonitoredOperator(ctx context.Context, clust } func (c *inventoryClient) GetClusterMonitoredOLMOperators(ctx context.Context, clusterId string) ([]models.MonitoredOperator, error) { - monitoredOperators, err := c.ai.Operators.ListOfClusterOperators(ctx, &operators.ListOfClusterOperatorsParams{ClusterID: strfmt.UUID(clusterId)}) + monitoredOperators, err := c.ai.Operators.V2ListOfClusterOperators(ctx, &operators.V2ListOfClusterOperatorsParams{ClusterID: strfmt.UUID(clusterId)}) if err != nil { return nil, aserror.GetAssistedError(err) } @@ -284,8 +305,8 @@ func createUrl(baseURL string) string { ) } -func (c *inventoryClient) createDownloadParams(filename string) *installer.DownloadClusterFilesParams { - return &installer.DownloadClusterFilesParams{ +func (c *inventoryClient) createDownloadParams(filename string) *installer.V2DownloadClusterFilesParams { + return &installer.V2DownloadClusterFilesParams{ ClusterID: c.clusterId, FileName: filename, } @@ -304,11 +325,11 @@ func (c *inventoryClient) createUpdateHostInstallProgressParams(infraEnvId, host func (c *inventoryClient) getHostsWithInventoryInfo(ctx context.Context, log logrus.FieldLogger, skippedStatuses []string) (map[string]HostData, error) { hostsWithHwInfo := make(map[string]HostData) - hosts, err := c.ai.Installer.ListHosts(ctx, &installer.ListHostsParams{ClusterID: c.clusterId}) + clusterData, err := c.GetCluster(ctx) if err != nil { - return nil, aserror.GetAssistedError(err) + return nil, err } - for _, host := range hosts.Payload { + for _, host := range clusterData.Hosts { if funk.IndexOf(skippedStatuses, *host.Status) > -1 { continue } @@ -324,22 +345,22 @@ func (c *inventoryClient) getHostsWithInventoryInfo(ctx context.Context, log log } func (c *inventoryClient) CompleteInstallation(ctx context.Context, clusterId string, isSuccess bool, errorInfo string) error { - _, err := c.ai.Installer.CompleteInstallation(ctx, - &installer.CompleteInstallationParams{ClusterID: strfmt.UUID(clusterId), + _, err := c.ai.Installer.V2CompleteInstallation(ctx, + &installer.V2CompleteInstallationParams{ClusterID: strfmt.UUID(clusterId), CompletionParams: &models.CompletionParams{IsSuccess: &isSuccess, ErrorInfo: errorInfo}}) return aserror.GetAssistedError(err) } func (c *inventoryClient) UploadLogs(ctx context.Context, clusterId string, logsType models.LogsType, upfile io.Reader) error { fileName := fmt.Sprintf("%s_logs.tar.gz", string(logsType)) - _, err := c.ai.Installer.UploadLogs(ctx, - &installer.UploadLogsParams{ClusterID: strfmt.UUID(clusterId), LogsType: string(logsType), + _, err := c.ai.Installer.V2UploadLogs(ctx, + &installer.V2UploadLogsParams{ClusterID: strfmt.UUID(clusterId), LogsType: string(logsType), Upfile: runtime.NamedReader(fileName, upfile)}) return aserror.GetAssistedError(err) } func (c *inventoryClient) ClusterLogProgressReport(ctx context.Context, clusterId string, progress models.LogsState) { - _, err := c.ai.Installer.UpdateClusterLogsProgress(ctx, &installer.UpdateClusterLogsProgressParams{ + _, err := c.ai.Installer.V2UpdateClusterLogsProgress(ctx, &installer.V2UpdateClusterLogsProgressParams{ ClusterID: strfmt.UUID(clusterId), LogsProgressParams: &models.LogsProgressParams{ LogsState: progress, @@ -350,10 +371,10 @@ func (c *inventoryClient) ClusterLogProgressReport(ctx context.Context, clusterI } } -func (c *inventoryClient) HostLogProgressReport(ctx context.Context, clusterId string, hostId string, progress models.LogsState) { - _, err := c.ai.Installer.UpdateHostLogsProgress(ctx, &installer.UpdateHostLogsProgressParams{ - ClusterID: strfmt.UUID(clusterId), - HostID: strfmt.UUID(hostId), +func (c *inventoryClient) HostLogProgressReport(ctx context.Context, infraEnvId string, hostId string, progress models.LogsState) { + _, err := c.ai.Installer.V2UpdateHostLogsProgress(ctx, &installer.V2UpdateHostLogsProgressParams{ + InfraEnvID: strfmt.UUID(infraEnvId), + HostID: strfmt.UUID(hostId), LogsProgressParams: &models.LogsProgressParams{ LogsState: progress, }, @@ -364,7 +385,7 @@ func (c *inventoryClient) HostLogProgressReport(ctx context.Context, clusterId s } func (c *inventoryClient) UpdateClusterOperator(ctx context.Context, clusterId string, operatorName string, operatorStatus models.OperatorStatus, operatorStatusInfo string) error { - _, err := c.ai.Operators.ReportMonitoredOperatorStatus(ctx, &operators.ReportMonitoredOperatorStatusParams{ + _, err := c.ai.Operators.V2ReportMonitoredOperatorStatus(ctx, &operators.V2ReportMonitoredOperatorStatusParams{ ClusterID: c.clusterId, ReportParams: &models.OperatorMonitorReport{ Name: operatorName, diff --git a/src/inventory_client/mock_inventory_client.go b/src/inventory_client/mock_inventory_client.go index 0284d803c..c36b05495 100644 --- a/src/inventory_client/mock_inventory_client.go +++ b/src/inventory_client/mock_inventory_client.go @@ -51,18 +51,32 @@ func (mr *MockInventoryClientMockRecorder) DownloadFile(ctx, filename, dest inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadFile", reflect.TypeOf((*MockInventoryClient)(nil).DownloadFile), ctx, filename, dest) } +// DownloadClusterCredentials mocks base method +func (m *MockInventoryClient) DownloadClusterCredentials(ctx context.Context, filename, dest string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DownloadClusterCredentials", ctx, filename, dest) + ret0, _ := ret[0].(error) + return ret0 +} + +// DownloadClusterCredentials indicates an expected call of DownloadClusterCredentials +func (mr *MockInventoryClientMockRecorder) DownloadClusterCredentials(ctx, filename, dest interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadClusterCredentials", reflect.TypeOf((*MockInventoryClient)(nil).DownloadClusterCredentials), ctx, filename, dest) +} + // DownloadHostIgnition mocks base method -func (m *MockInventoryClient) DownloadHostIgnition(ctx context.Context, hostID, dest string) error { +func (m *MockInventoryClient) DownloadHostIgnition(ctx context.Context, infraEnvID, hostID, dest string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DownloadHostIgnition", ctx, hostID, dest) + ret := m.ctrl.Call(m, "DownloadHostIgnition", ctx, infraEnvID, hostID, dest) ret0, _ := ret[0].(error) return ret0 } // DownloadHostIgnition indicates an expected call of DownloadHostIgnition -func (mr *MockInventoryClientMockRecorder) DownloadHostIgnition(ctx, hostID, dest interface{}) *gomock.Call { +func (mr *MockInventoryClientMockRecorder) DownloadHostIgnition(ctx, infraEnvID, hostID, dest interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadHostIgnition", reflect.TypeOf((*MockInventoryClient)(nil).DownloadHostIgnition), ctx, hostID, dest) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadHostIgnition", reflect.TypeOf((*MockInventoryClient)(nil).DownloadHostIgnition), ctx, infraEnvID, hostID, dest) } // UpdateHostInstallProgress mocks base method diff --git a/src/main/assisted-installer-controller/assisted_installer_main.go b/src/main/assisted-installer-controller/assisted_installer_main.go index 4a5c45d83..73ec8d421 100644 --- a/src/main/assisted-installer-controller/assisted_installer_main.go +++ b/src/main/assisted-installer-controller/assisted_installer_main.go @@ -117,10 +117,10 @@ func waitForInstallation(client inventory_client.InventoryClient, log logrus.Fie // we should exit controller after maximumErrorsBeforeExit errors // in case cluster was deleted we should exit immediately switch err.(type) { - case *installer.GetClusterNotFound: + case *installer.V2GetClusterNotFound: errCounter = errCounter + maximumErrorsBeforeExit log.WithError(err).Errorf("Cluster was not found in inventory or user is not authorized") - case *installer.GetClusterUnauthorized: + case *installer.V2GetClusterUnauthorized: errCounter++ log.WithError(err).Errorf("User is not authenticated to perform the operation") } diff --git a/src/main/assisted-installer-controller/assisted_installer_main_test.go b/src/main/assisted-installer-controller/assisted_installer_main_test.go index 5a847dc5a..718839dc9 100644 --- a/src/main/assisted-installer-controller/assisted_installer_main_test.go +++ b/src/main/assisted-installer-controller/assisted_installer_main_test.go @@ -71,7 +71,7 @@ var _ = Describe("installer HostRoleMaster role", func() { exit = func(code int) { exitCode = code } - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewGetClusterUnauthorized()).Times(maximumErrorsBeforeExit) + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewV2GetClusterUnauthorized()).Times(maximumErrorsBeforeExit) // added to make waitForInstallation exit mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: swag.String(models.ClusterStatusInstalled)}, nil).Times(1) waitForInstallation(mockbmclient, l, status) @@ -84,7 +84,7 @@ var _ = Describe("installer HostRoleMaster role", func() { exit = func(code int) { exitCode = code } - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewGetClusterNotFound()).Times(maximumErrorsBeforeExit) + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewV2GetClusterNotFound()).Times(maximumErrorsBeforeExit) // added to make waitForInstallation exit mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: swag.String(models.ClusterStatusInstalled)}, nil).Times(1) @@ -99,7 +99,7 @@ var _ = Describe("installer HostRoleMaster role", func() { exit = func(code int) { exitCode = code } - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewGetClusterNotFound()).Times(1) + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewV2GetClusterNotFound()).Times(1) // added to make waitForInstallation exit mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: swag.String(models.ClusterStatusInstalled)}, nil).Times(1) @@ -113,7 +113,7 @@ var _ = Describe("installer HostRoleMaster role", func() { exit = func(code int) { exitCode = code } - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewGetClusterUnauthorized()).Times(maximumErrorsBeforeExit) + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewV2GetClusterUnauthorized()).Times(maximumErrorsBeforeExit) // added to make waitForInstallation exit mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: swag.String(models.ClusterStatusInstalled)}, nil).Times(1) @@ -127,7 +127,7 @@ var _ = Describe("installer HostRoleMaster role", func() { exit = func(code int) { exitCode = code } - mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewGetClusterUnauthorized()).Times(maximumErrorsBeforeExit - 2) + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(nil, installer.NewV2GetClusterUnauthorized()).Times(maximumErrorsBeforeExit - 2) // added to make waitForInstallation exit mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: swag.String(models.ClusterStatusInstalled)}, nil).Times(1) From b2d7e1e2d9c572f3285ac5a99f3e038c6c72da07 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Thu, 23 Sep 2021 13:42:24 +0100 Subject: [PATCH 33/43] MGMT-7760: Switch to stream8 (#360) The original plan was to move all images to ubi8. This is not possible due to the lack of some packages that are needed for other projects. We are now going to switch all images to stream8 with the hope that the consistency accross repos will prevent (or help) with debugging current/future issues in CI. The goal is to keep component's builds as consistent as possible in the channels we are releasing them on Signed-off-by: Flavio Percoco Co-authored-by: Flavio Percoco --- Dockerfile.assisted-installer | 2 +- Dockerfile.assisted-installer-controller | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.assisted-installer b/Dockerfile.assisted-installer index 740938ee1..37f667ce6 100644 --- a/Dockerfile.assisted-installer +++ b/Dockerfile.assisted-installer @@ -11,7 +11,7 @@ RUN go mod download COPY . . RUN make installer -FROM quay.io/centos/centos:centos8 +FROM quay.io/centos/centos:stream8 COPY --from=builder /go/src/github.com/openshift/assisted-installer/build/installer /usr/bin/installer COPY --from=builder /go/src/github.com/openshift/assisted-installer/deploy/assisted-installer-controller /assisted-installer-controller/deploy diff --git a/Dockerfile.assisted-installer-controller b/Dockerfile.assisted-installer-controller index be411fead..2e10c906b 100644 --- a/Dockerfile.assisted-installer-controller +++ b/Dockerfile.assisted-installer-controller @@ -13,7 +13,7 @@ RUN go mod download COPY . . RUN make controller -FROM quay.io/centos/centos:centos8 +FROM quay.io/centos/centos:stream8 RUN yum -y install make gcc unzip wget curl rsync && yum clean all COPY --from=builder /go/src/github.com/openshift/assisted-installer/build/assisted-installer-controller /usr/bin/assisted-installer-controller From be3b5fd419778b79b02cf064170ef76eb2b15c64 Mon Sep 17 00:00:00 2001 From: Lisa Rashidi-Ranjbar Date: Wed, 29 Sep 2021 12:27:01 -0700 Subject: [PATCH 34/43] NO-ISSUE: Add lranjbar to OWNERS_ALIASES (#356) --- OWNERS_ALIASES | 1 + 1 file changed, 1 insertion(+) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 2b48a317a..f2fdd3653 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -22,6 +22,7 @@ aliases: - osherdp - flaper87 - mkowalski + - lranjbar code-reviewers: - jakub-dzon - pkliczewski From 04b57c509591edbc8ec1a7a708daa39186aae3e3 Mon Sep 17 00:00:00 2001 From: Mateusz Kowalski Date: Tue, 5 Oct 2021 16:59:04 +0200 Subject: [PATCH 35/43] Bug 1966621: Do not use run-level label for assisted-installer namespace (#291) * Bug 1966621: Do not use run-level label for assisted-installer namespace Namespaces using run-level label are considered to be highly privileged and Security Context Constrains are not applied to the workload running in them. One of the deployment models for assisted-installer uses cluster deployed by the AI service to deploy next clusters. In this scenario, if the same `assisted-installer` namespace is used for deploying Assisted Service Operator, the pods do not get any securityContext properties applied. This, apart from the potential security violations, causes functional errors as e.g. Postgres container is running using wrong UID. This PR changes configuration of the `assisted-installer` namespace so that it does not have run-level label applied and is treated like any other customer namespace. Contributes-to: OCPBUGSM-29833 * Bug 1966621: Clean up the code after run-level label removal With the `run-level` label being completely dropped we are now removing the remaining logic handling it in the post-installation steps. Contributes-to: OCPBUGSM-29833 * Bug 1966621: Allow assisted-installer service account to use SCCs This commits adds additional permissions to the service account used by the assisted-installer-controller. As we no longer override Security Context Constrains for the whole assisted-installer namespace, we are adding explicit permissions to the account used to run the AI controller pod. Contributes-to: OCPBUGSM-29833 --- .../assisted-installer-controller-nm.yaml | 2 -- .../assisted-installer-controller-role.yaml | 14 ++++++++++++++ .../assisted_installer_controller.go | 13 ------------- .../assisted_installer_controller_test.go | 17 ----------------- 4 files changed, 14 insertions(+), 32 deletions(-) diff --git a/deploy/assisted-installer-controller/assisted-installer-controller-nm.yaml b/deploy/assisted-installer-controller/assisted-installer-controller-nm.yaml index f2a39392b..3be4e8009 100644 --- a/deploy/assisted-installer-controller/assisted-installer-controller-nm.yaml +++ b/deploy/assisted-installer-controller/assisted-installer-controller-nm.yaml @@ -2,5 +2,3 @@ apiVersion: v1 kind: Namespace metadata: name: assisted-installer - labels: - openshift.io/run-level: "0" \ No newline at end of file diff --git a/deploy/assisted-installer-controller/assisted-installer-controller-role.yaml b/deploy/assisted-installer-controller/assisted-installer-controller-role.yaml index aa5e4348f..44abc74eb 100644 --- a/deploy/assisted-installer-controller/assisted-installer-controller-role.yaml +++ b/deploy/assisted-installer-controller/assisted-installer-controller-role.yaml @@ -154,3 +154,17 @@ rules: - pods verbs: - deletecollection + - apiGroups: + - "security.openshift.io" + resourceNames: + - "anyuid" + - "nonroot" + - "hostmount-anyuid" + - "machine-api-termination-handler" + - "hostnetwork" + - "hostaccess" + - "node-exporter" + resources: + - securitycontextconstraints + verbs: + - use diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index e653b7130..26f4046f9 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -434,19 +434,6 @@ func (c controller) postInstallConfigs(ctx context.Context) error { } } - // Unlabel run-level from assisted-installer namespace after the installation. - // Keeping the `run-level` label represents a security risk as it overwrites the SecurityContext configurations - // used for applications deployed in this namespace. - data := []byte(`{"metadata":{"labels":{"$patch": "delete", "openshift.io/run-level":"0"}}}`) - c.log.Infof("Removing run-level label from %s namespace", c.ControllerConfig.Namespace) - err = c.kc.PatchNamespace(c.ControllerConfig.Namespace, data) - if err != nil { - // It is a conscious decision not to fail an installation if for any reason patching the namespace - // in order to remove the `run-level` label has failed. This will be redesigned in the next release - // so that the `run-level` label is not created in the first place. - c.log.Warn("Failed to unlabel AI namespace after the installation.") - } - err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.addRouterCAToClusterCA) if err != nil { return errors.Wrapf(err, "Timeout while waiting router ca data") diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index a478f9e2d..6e2de469c 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -49,8 +49,6 @@ var ( MustGatherImage: "quay.io/test-must-gather:latest", } - aiNamespaceRunlevelPatch = []byte(`{"metadata":{"labels":{"$patch": "delete", "openshift.io/run-level":"0"}}}`) - progressClusterVersionCondition = &configv1.ClusterVersion{ Status: configv1.ClusterVersionStatus{ Conditions: []configv1.ClusterOperatorStatusCondition{{Type: configv1.OperatorProgressing, @@ -644,9 +642,6 @@ var _ = Describe("installer HostRoleMaster role", func() { setConsoleAsAvailable("cluster-id") - // Patching NS - mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) - // CVO mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). Return(&models.MonitoredOperator{Status: "", StatusInfo: ""}, nil).Times(1) @@ -703,9 +698,6 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) - // Patching NS - mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) - wg.Add(1) assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() @@ -718,9 +710,6 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", false, "Timeout while waiting router ca data: timed out").Return(nil).Times(1) - // Patching NS - mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) - wg.Add(1) go assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() @@ -776,9 +765,6 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) - // Patching NS - mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) - wg.Add(1) assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() @@ -804,9 +790,6 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), "cluster-id", "lso", models.OperatorStatusFailed, "Waiting for operator timed out").Return(nil).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) - // Patching NS - mockk8sclient.EXPECT().PatchNamespace(defaultTestControllerConf.Namespace, aiNamespaceRunlevelPatch).Return(nil) - wg.Add(1) assistedController.PostInstallConfigs(context.TODO(), &wg) wg.Wait() From f88da83680f3059a65253b7c1635fb6f4e9b362b Mon Sep 17 00:00:00 2001 From: Yuval Goldberg Date: Wed, 6 Oct 2021 13:05:01 +0300 Subject: [PATCH 36/43] MGMT-4078: Validate console operator in parallel to CVO (#362) --- .../assisted_installer_controller.go | 38 ++-- .../assisted_installer_controller_test.go | 199 ++++++++++-------- 2 files changed, 133 insertions(+), 104 deletions(-) diff --git a/src/assisted_installer_controller/assisted_installer_controller.go b/src/assisted_installer_controller/assisted_installer_controller.go index 26f4046f9..027d3696e 100644 --- a/src/assisted_installer_controller/assisted_installer_controller.go +++ b/src/assisted_installer_controller/assisted_installer_controller.go @@ -426,12 +426,8 @@ func (c controller) PostInstallConfigs(ctx context.Context, wg *sync.WaitGroup) func (c controller) postInstallConfigs(ctx context.Context) error { var err error - c.log.Infof("Waiting for cluster version operator: %t", c.WaitForClusterVersion) - - if c.WaitForClusterVersion { - if err = c.waitingForClusterVersion(ctx); err != nil { - return errors.Wrapf(err, "Timeout while waiting for cluster version to be available") - } + if err = c.waitingForClusterOperators(ctx); err != nil { + return errors.Wrapf(err, "Timeout while waiting for cluster operators to be available") } err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.addRouterCAToClusterCA) @@ -451,10 +447,6 @@ func (c controller) postInstallConfigs(ctx context.Context) error { c.log.Infof("Skipping etcd unpatch for cluster version %s", c.ControllerConfig.OpenshiftVersion) } - if err = utils.WaitForPredicateWithContext(ctx, WaitTimeout, GeneralWaitInterval, c.validateConsoleAvailability); err != nil { - return errors.Wrapf(err, "Timeout while waiting for console to become available") - } - // Wait for OLM operators if err = c.waitForOLMOperators(ctx); err != nil { return errors.Wrapf(err, "Error while initializing OLM operators") @@ -931,21 +923,23 @@ func (c controller) waitForCSV(ctx context.Context, waitTimeout time.Duration) e return utils.WaitForPredicateWithContext(ctx, waitTimeout, GeneralWaitInterval, areOLMOperatorsAvailable) } -// validateConsoleAvailability checks if the console operator is available -func (c controller) validateConsoleAvailability() bool { - return c.isOperatorAvailable(NewClusterOperatorHandler(c.kc, consoleOperatorName)) -} - -// waitingForClusterVersion checks the Cluster Version Operator availability in the -// new OCP cluster. A success would be announced only when the service acknowledges -// the CVO availability, in order to avoid unsycned scenarios. -// In case cvo changes it message we will update timer but we want to have maximum timeout -// for this context with timeout is used -func (c controller) waitingForClusterVersion(ctx context.Context) error { +// waitingForClusterOperators checks Console operator and the Cluster Version Operator availability in the +// new OCP cluster in parallel. +// A success would be announced only when the service acknowledges the operators availability, +// in order to avoid unsycned scenarios. +func (c controller) waitingForClusterOperators(ctx context.Context) error { + // In case cvo changes it message we will update timer but we want to have maximum timeout + // for this context with timeout is used ctxWithTimeout, cancel := context.WithTimeout(ctx, CVOMaxTimeout) defer cancel() isClusterVersionAvailable := func(timer *time.Timer) bool { - return c.isOperatorAvailable(NewClusterVersionHandler(c.kc, timer)) + result := c.isOperatorAvailable(NewClusterOperatorHandler(c.kc, consoleOperatorName)) + + if c.WaitForClusterVersion { + result = c.isOperatorAvailable(NewClusterVersionHandler(c.kc, timer)) + } + + return result } return utils.WaitForPredicateWithTimer(ctxWithTimeout, WaitTimeout, GeneralProgressUpdateInt, isClusterVersionAvailable) } diff --git a/src/assisted_installer_controller/assisted_installer_controller_test.go b/src/assisted_installer_controller/assisted_installer_controller_test.go index 6e2de469c..b1eda488d 100644 --- a/src/assisted_installer_controller/assisted_installer_controller_test.go +++ b/src/assisted_installer_controller/assisted_installer_controller_test.go @@ -155,7 +155,11 @@ var _ = Describe("installer HostRoleMaster role", func() { mockGetServiceOperators := func(operators []models.MonitoredOperator) { for index := range operators { - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), operators[index].Name).Return(&operators[index], nil).Times(1) + if operators[index].Status != models.OperatorStatusAvailable { + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), operators[index].Name).Return(&operators[index], nil).Times(1) + } else { + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), operators[index].Name).Return(&operators[index], nil).MinTimes(1) + } } } @@ -166,6 +170,8 @@ var _ = Describe("installer HostRoleMaster role", func() { } setConsoleAsAvailable := func(clusterID string) { + WaitTimeout = 100 * time.Millisecond + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return(validConsoleOperator, nil).Times(1) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), clusterID, consoleOperatorName, models.OperatorStatusAvailable, gomock.Any()).Return(nil).Times(1) @@ -173,6 +179,14 @@ var _ = Describe("installer HostRoleMaster role", func() { mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusAvailable}}) } + setCvoAsAvailable := func() { + mockGetServiceOperators([]models.MonitoredOperator{{Name: cvoOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(availableClusterVersionCondition, nil).Times(1) + mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, models.OperatorStatusAvailable, availableClusterVersionCondition.Status.Conditions[0].Message).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: cvoOperatorName, Status: models.OperatorStatusAvailable}}) + } + setClusterAsFinalizing := func() { finalizing := models.ClusterStatusFinalizing mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: &finalizing}, nil).Times(1) @@ -185,11 +199,9 @@ var _ = Describe("installer HostRoleMaster role", func() { } setControllerWaitForOLMOperators := func(clusterID string) { - WaitTimeout = 100 * time.Millisecond - setClusterAsFinalizing() - uploadIngressCert(clusterID) setConsoleAsAvailable(clusterID) + uploadIngressCert(clusterID) } returnServiceWithAddress := func(name, namespace, ip string) *gomock.Call { @@ -576,91 +588,105 @@ var _ = Describe("installer HostRoleMaster role", func() { }) It("success", func() { + installing := models.ClusterStatusInstalling + mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: &installing}, nil).Times(1) + setControllerWaitForOLMOperators(assistedController.ClusterID) + setCvoAsAvailable() + + // Completion + mockGetOLMOperators([]models.MonitoredOperator{}) + mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) + mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) + + wg.Add(1) + go assistedController.PostInstallConfigs(context.TODO(), &wg) + wg.Wait() + + Expect(assistedController.Status.HasError()).Should(Equal(false)) + }) + + It("lots of failures then success", func() { installing := models.ClusterStatusInstalling mockbmclient.EXPECT().GetCluster(gomock.Any()).Return(&models.Cluster{Status: &installing}, nil).Times(1) setClusterAsFinalizing() - uploadIngressCert(assistedController.ClusterID) - // Console - By("console errors", func() { - mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return(nil, fmt.Errorf("no-operator")).Times(1) + // Console errors + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return(nil, fmt.Errorf("no-operator")).Times(1) - mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - &configv1.ClusterOperator{ - Status: configv1.ClusterOperatorStatus{ - Conditions: []configv1.ClusterOperatorStatusCondition{}, - }, - }, fmt.Errorf("no-conditions")).Times(1) - - mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithCondition(configv1.OperatorDegraded, configv1.ConditionFalse), - fmt.Errorf("false-degraded-condition")).Times(1) - - mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionTrue), - fmt.Errorf("missing-degraded-condition")).Times(1) - - mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionFalse), - fmt.Errorf("false-available-condition")).Times(1) - - mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionTrue), - fmt.Errorf("true-degraded-condition")).Times(1) - - mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - &configv1.ClusterOperator{ - Status: configv1.ClusterOperatorStatus{ - Conditions: []configv1.ClusterOperatorStatusCondition{ - {Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse}, - }, + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + &configv1.ClusterOperator{ + Status: configv1.ClusterOperatorStatus{ + Conditions: []configv1.ClusterOperatorStatusCondition{}, + }, + }, fmt.Errorf("no-conditions")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithCondition(configv1.OperatorDegraded, configv1.ConditionFalse), + fmt.Errorf("false-degraded-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionTrue), + fmt.Errorf("missing-degraded-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionFalse), + fmt.Errorf("false-available-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithCondition(configv1.OperatorAvailable, configv1.ConditionTrue), + fmt.Errorf("true-degraded-condition")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + &configv1.ClusterOperator{ + Status: configv1.ClusterOperatorStatus{ + Conditions: []configv1.ClusterOperatorStatusCondition{ + {Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse}, }, - }, fmt.Errorf("missing-conditions")).Times(1) - - mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithConditionsStatus(configv1.ConditionTrue, configv1.ConditionTrue), - fmt.Errorf("bad-conditions-status")).Times(1) - - mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithConditionsStatus(configv1.ConditionFalse, configv1.ConditionTrue), - fmt.Errorf("bad-conditions-status")).Times(1) - - mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) - mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( - getClusterOperatorWithConditionsStatus(configv1.ConditionFalse, configv1.ConditionFalse), - fmt.Errorf("bad-conditions-status")).Times(1) - }) + }, + }, fmt.Errorf("missing-conditions")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithConditionsStatus(configv1.ConditionTrue, configv1.ConditionTrue), + fmt.Errorf("bad-conditions-status")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithConditionsStatus(configv1.ConditionFalse, configv1.ConditionTrue), + fmt.Errorf("bad-conditions-status")).Times(1) + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusProgressing}}) + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return( + getClusterOperatorWithConditionsStatus(configv1.ConditionFalse, configv1.ConditionFalse), + fmt.Errorf("bad-conditions-status")).Times(1) setConsoleAsAvailable("cluster-id") + uploadIngressCert(assistedController.ClusterID) - // CVO - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). - Return(&models.MonitoredOperator{Status: "", StatusInfo: ""}, nil).Times(1) + // CVO errors + mockGetServiceOperators([]models.MonitoredOperator{{Name: cvoOperatorName, Status: ""}}) mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(nil, fmt.Errorf("dummy")).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). - Return(&models.MonitoredOperator{Status: "", StatusInfo: ""}, nil).Times(1) + mockGetServiceOperators([]models.MonitoredOperator{{Name: cvoOperatorName, Status: ""}}) mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(progressClusterVersionCondition, nil).Times(1) mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, models.OperatorStatusProgressing, progressClusterVersionCondition.Status.Conditions[0].Message).Times(1) - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). - Return(&models.MonitoredOperator{Status: models.OperatorStatusProgressing, StatusInfo: progressClusterVersionCondition.Status.Conditions[0].Message}, nil).Times(1) - mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(availableClusterVersionCondition, nil).Times(1) - mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, models.OperatorStatusAvailable, availableClusterVersionCondition.Status.Conditions[0].Message).Times(1) + // Fail 8 more times when console fail + extraFailTimes := 8 + for i := 0; i < extraFailTimes; i++ { + mockGetServiceOperators([]models.MonitoredOperator{{Name: cvoOperatorName, Status: ""}}) + } + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(nil, fmt.Errorf("dummy")).Times(extraFailTimes) - mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). - Return(&models.MonitoredOperator{Status: models.OperatorStatusAvailable, StatusInfo: availableClusterVersionCondition.Status.Conditions[0].Message}, nil).Times(1) + setCvoAsAvailable() - // Completion mockGetOLMOperators([]models.MonitoredOperator{}) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(fmt.Errorf("dummy")).Times(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", true, "").Return(nil).Times(1) @@ -671,11 +697,17 @@ var _ = Describe("installer HostRoleMaster role", func() { Expect(assistedController.Status.HasError()).Should(Equal(false)) }) + It("failure", func() { - WaitTimeout = 20 * time.Millisecond - GeneralProgressUpdateInt = 30 * time.Millisecond setClusterAsFinalizing() + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), consoleOperatorName). + Return(&models.MonitoredOperator{Status: "", StatusInfo: ""}, nil).AnyTimes() + mockk8sclient.EXPECT().GetClusterOperator(consoleOperatorName).Return(nil, fmt.Errorf("dummy")).AnyTimes() + mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName). + Return(&models.MonitoredOperator{Status: "", StatusInfo: ""}, nil).AnyTimes() + mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(nil, fmt.Errorf("dummy")).AnyTimes() + mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", false, gomock.Any()).Return(nil).Times(1) wg.Add(1) @@ -704,8 +736,8 @@ var _ = Describe("installer HostRoleMaster role", func() { Expect(assistedController.Status.HasError()).Should(Equal(false)) }) It("failure", func() { - WaitTimeout = 20 * time.Millisecond setClusterAsFinalizing() + setConsoleAsAvailable("cluster-id") mockk8sclient.EXPECT().GetConfigMap(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("aaa")).MinTimes(1) mockbmclient.EXPECT().CompleteInstallation(gomock.Any(), "cluster-id", false, "Timeout while waiting router ca data: timed out").Return(nil).Times(1) @@ -1257,7 +1289,7 @@ var _ = Describe("installer HostRoleMaster role", func() { }) }) - Context("waitingForClusterVersion", func() { + Context("waitingForClusterOperators", func() { ctx := context.TODO() tests := []struct { name string @@ -1327,9 +1359,12 @@ var _ = Describe("installer HostRoleMaster role", func() { } BeforeEach(func() { + assistedController.WaitForClusterVersion = true GeneralProgressUpdateInt = 100 * time.Millisecond WaitTimeout = 150 * time.Millisecond CVOMaxTimeout = 1 * time.Second + + mockGetServiceOperators([]models.MonitoredOperator{{Name: consoleOperatorName, Status: models.OperatorStatusAvailable}}) }) for i := range tests { @@ -1362,9 +1397,9 @@ var _ = Describe("installer HostRoleMaster role", func() { mockk8sclient.EXPECT().GetClusterVersion(clusterVersionName).Return(clusterVersionReport, nil).MinTimes(amountOfSamples) if newServiceCVOStatus.Status == models.OperatorStatusAvailable { - Expect(assistedController.waitingForClusterVersion(ctx)).ShouldNot(HaveOccurred()) + Expect(assistedController.waitingForClusterOperators(ctx)).ShouldNot(HaveOccurred()) } else { - Expect(assistedController.waitingForClusterVersion(ctx)).Should(HaveOccurred()) + Expect(assistedController.waitingForClusterOperators(ctx)).Should(HaveOccurred()) } }) } @@ -1386,7 +1421,7 @@ var _ = Describe("installer HostRoleMaster role", func() { err := func() error { ctxTimeout, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - return assistedController.waitingForClusterVersion(ctxTimeout) + return assistedController.waitingForClusterOperators(ctxTimeout) }() Expect(errors.Is(err, context.DeadlineExceeded)).To(BeTrue()) @@ -1409,7 +1444,7 @@ var _ = Describe("installer HostRoleMaster role", func() { mockbmclient.EXPECT().UpdateClusterOperator(gomock.Any(), gomock.Any(), cvoOperatorName, gomock.Any(), gomock.Any()).AnyTimes() err := func() error { - return assistedController.waitingForClusterVersion(ctx) + return assistedController.waitingForClusterOperators(ctx) }() Expect(errors.Is(err, context.DeadlineExceeded)).To(BeTrue()) @@ -1434,7 +1469,7 @@ var _ = Describe("installer HostRoleMaster role", func() { // Service succeed mockbmclient.EXPECT().GetClusterMonitoredOperator(gomock.Any(), gomock.Any(), cvoOperatorName).Return(newServiceCVOStatus, nil).Times(1) - Expect(assistedController.waitingForClusterVersion(context.TODO())).ShouldNot(HaveOccurred()) + Expect(assistedController.waitingForClusterOperators(context.TODO())).ShouldNot(HaveOccurred()) }) }) From a4fc5e8e45b58b5fa901c39b612c26c30bb8490c Mon Sep 17 00:00:00 2001 From: Yuval Goldberg Date: Thu, 7 Oct 2021 17:26:26 +0300 Subject: [PATCH 37/43] NO-ISSUE: Add commit-message prefix NO-ISSUE to dependanbot (#364) --- .github/dependabot.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 645c14028..fcf324930 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -15,8 +15,12 @@ updates: - "ok-to-test" - "go" - "dependencies" + commit-message: + prefix: "NO-ISSUE" - package-ecosystem: "docker" directory: "/" schedule: interval: "weekly" + commit-message: + prefix: "NO-ISSUE" From 08f2d4886744dea25e2cc24d1b778e15d4ec84e9 Mon Sep 17 00:00:00 2001 From: Omer Tuchfeld Date: Sun, 10 Oct 2021 17:45:31 +0200 Subject: [PATCH 38/43] NO-ISSUE: Add `replaces` entry in go.mod for `irifrance/gini` which has moved in GitHub (#365) The https://github.com/irifrance/gin repo we indirectly depended on via our `github.com/operator-framework/operator-lifecycle-manager v0.18.0` dependency has moved to https://github.com/go-air/gin. This invalid dependency was fixed in newer versions of the operator-lifecycle-manager, but I prefer just fixing this issue with a `replace` directive rather than dealing with an olm upgrade. Without this `replace` directive, attempting to work on the installer repository locally causes my IDE / go commands to complain about irifrance/gin being gone, this `replace` directive fixes those issues. --- go.mod | 1 + go.sum | 1 + 2 files changed, 2 insertions(+) diff --git a/go.mod b/go.mod index 34621e5bf..ef808ef25 100644 --- a/go.mod +++ b/go.mod @@ -44,6 +44,7 @@ replace ( github.com/openshift/api => github.com/openshift/api v0.0.0-20200901182017-7ac89ba6b971 github.com/openshift/hive/pkg/apis => github.com/carbonin/hive/pkg/apis v0.0.0-20210209195732-57e8c3ae12d1 github.com/openshift/machine-api-operator => github.com/openshift/machine-api-operator v0.2.1-0.20201026110925-50ea569da51b + github.com/irifrance/gini => github.com/go-air/gini v1.0.1 k8s.io/api => k8s.io/api v0.19.2 k8s.io/apimachinery => k8s.io/apimachinery v0.19.2 k8s.io/client-go => k8s.io/client-go v0.19.2 diff --git a/go.sum b/go.sum index 9da6317d7..56cbbf583 100644 --- a/go.sum +++ b/go.sum @@ -394,6 +394,7 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-air/gini v1.0.1/go.mod h1:swH5OTtiG/X/YrU06r288qZwq6I1agpbuXQOB55xqGU= github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= From a6debecfbbca16190db482dbc7201321a0a131c8 Mon Sep 17 00:00:00 2001 From: Eran Cohen Date: Sun, 10 Oct 2021 20:21:44 +0300 Subject: [PATCH 39/43] Bug 2004633: cluster deployment failed on connection error (#366) Updated retry configuration to 1 hour --- src/inventory_client/inventory_client.go | 13 +++++++++---- src/inventory_client/inventory_client_test.go | 4 ++-- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/inventory_client/inventory_client.go b/src/inventory_client/inventory_client.go index 2086fdcd9..eb688cbb0 100644 --- a/src/inventory_client/inventory_client.go +++ b/src/inventory_client/inventory_client.go @@ -36,7 +36,8 @@ import ( const ( defaultRetryMinDelay = time.Duration(2) * time.Second defaultRetryMaxDelay = time.Duration(10) * time.Second - defaultMaxRetries = 10 + defaultMinRetries = 10 + defaultMaxRetries = 360 ) //go:generate mockgen -source=inventory_client.go -package=inventory_client -destination=mock_inventory_client.go @@ -73,12 +74,12 @@ type HostData struct { func CreateInventoryClient(clusterId string, inventoryURL string, pullSecret string, insecure bool, caPath string, logger *logrus.Logger, proxyFunc func(*http.Request) (*url.URL, error)) (*inventoryClient, error) { return CreateInventoryClientWithDelay(clusterId, inventoryURL, pullSecret, insecure, caPath, - logger, proxyFunc, defaultRetryMinDelay, defaultRetryMaxDelay, defaultMaxRetries) + logger, proxyFunc, defaultRetryMinDelay, defaultRetryMaxDelay, defaultMaxRetries, defaultMinRetries) } func CreateInventoryClientWithDelay(clusterId string, inventoryURL string, pullSecret string, insecure bool, caPath string, logger *logrus.Logger, proxyFunc func(*http.Request) (*url.URL, error), - retryMinDelay, retryMaxDelay time.Duration, maxRetries int) (*inventoryClient, error) { + retryMinDelay, retryMaxDelay time.Duration, maxRetries int, minRetries int) (*inventoryClient, error) { clientConfig := client.Config{} var err error clientConfig.URL, err = url.ParseRequestURI(createUrl(inventoryURL)) @@ -116,9 +117,13 @@ func CreateInventoryClientWithDelay(clusterId string, inventoryURL string, pullS tr := rehttp.NewTransport( transport, rehttp.RetryAny( + rehttp.RetryAll( + rehttp.RetryMaxRetries(minRetries), + rehttp.RetryStatusInterval(400, 404), + ), rehttp.RetryAll( rehttp.RetryMaxRetries(maxRetries), - rehttp.RetryStatusInterval(400, 600), + rehttp.RetryStatusInterval(405, 600), ), rehttp.RetryAll( rehttp.RetryMaxRetries(maxRetries), diff --git a/src/inventory_client/inventory_client_test.go b/src/inventory_client/inventory_client_test.go index 985f8bf2d..41b546814 100644 --- a/src/inventory_client/inventory_client_test.go +++ b/src/inventory_client/inventory_client_test.go @@ -46,7 +46,7 @@ var _ = Describe("inventory_client_tests", func() { server.SetAllowUnhandledRequests(true) server.SetUnhandledRequestStatusCode(http.StatusInternalServerError) // 500 client, err = CreateInventoryClientWithDelay(clusterID, "http://"+server.Addr(), "pullSecret", true, "", - logger, nil, testRetryDelay, testRetryMaxDelay, testMaxRetries) + logger, nil, testRetryDelay, testRetryMaxDelay, testMaxRetries, testMaxRetries) Expect(err).ShouldNot(HaveOccurred()) Expect(client).ShouldNot(BeNil()) }) @@ -79,7 +79,7 @@ var _ = Describe("inventory_client_tests", func() { It("positive_late_response", func() { server.Start() expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusInternalServerError) - expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusForbidden) + expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusServiceUnavailable) expectServerCall(server, fmt.Sprintf("/api/assisted-install/v2/infra-envs/%s/hosts/%s/progress", infraEnvID, hostID), expectedJson, http.StatusOK) Expect(client.UpdateHostInstallProgress(context.Background(), infraEnvID, hostID, models.HostStageInstalling, "")).ShouldNot(HaveOccurred()) From aa6c55f384f9db22aba361e55f1cc2a6b4778855 Mon Sep 17 00:00:00 2001 From: Eran Cohen Date: Sun, 10 Oct 2021 20:29:42 +0300 Subject: [PATCH 40/43] NO-ISSUE: Add omertuc to code-approvers (#367) --- OWNERS_ALIASES | 1 + 1 file changed, 1 insertion(+) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index f2fdd3653..9523d878f 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -23,6 +23,7 @@ aliases: - flaper87 - mkowalski - lranjbar + - omertuc code-reviewers: - jakub-dzon - pkliczewski From e6d6c7b12e0bf92716c679ea51d1bb6d5cc5d4b2 Mon Sep 17 00:00:00 2001 From: Yevgeny Shnaidman <60741237+yevgeny-shnaidman@users.noreply.github.com> Date: Mon, 11 Oct 2021 16:22:23 +0300 Subject: [PATCH 41/43] Bug 2010183: LogURL field of the AgentClusterInstall is not filled upon failure (#368) LogURL field is filled in case nay instance of the logs was generated In case cluster install fails due to the bootstrap node being stuck, the logs are generated by the 2 masters at the ned of installation process (writing to disk). In that case code directly brings up the send_logs command via podman, and the infra-env parameter should be passed there also --- src/ops/ops.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/ops/ops.go b/src/ops/ops.go index 03abde21b..2d013a406 100644 --- a/src/ops/ops.go +++ b/src/ops/ops.go @@ -463,12 +463,16 @@ func (o *ops) GetMCSLogs() (string, error) { return string(logs), nil } +// This function actually runs container that imeplements logs_sender command +// Any change to the assisted-service API that is used by the logs_sender command +// ( for example UploadLogs), must be reflected here (input parameters, etc'), +// if needed func (o *ops) UploadInstallationLogs(isBootstrap bool) (string, error) { command := "podman" args := []string{"run", "--rm", "--privileged", "--net=host", "--pid=host", "-v", "/run/systemd/journal/socket:/run/systemd/journal/socket", "-v", "/var/log:/var/log", config.GlobalConfig.AgentImage, "logs_sender", "-cluster-id", config.GlobalConfig.ClusterID, "-url", config.GlobalConfig.URL, - "-host-id", config.GlobalConfig.HostID, + "-host-id", config.GlobalConfig.HostID, "-infra-env-id", config.GlobalConfig.InfraEnvID, "-pull-secret-token", config.GlobalConfig.PullSecretToken, fmt.Sprintf("-insecure=%s", strconv.FormatBool(config.GlobalConfig.SkipCertVerification)), fmt.Sprintf("-bootstrap=%s", strconv.FormatBool(isBootstrap)), From 149411cf81ec722e7d9ccaae4f3e4f430cd034cd Mon Sep 17 00:00:00 2001 From: Omer Tuchfeld Date: Tue, 12 Oct 2021 12:26:05 +0200 Subject: [PATCH 42/43] NO-ISSUE: Remove user yuvigold (#370) --- OWNERS_ALIASES | 1 - 1 file changed, 1 deletion(-) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 9523d878f..4851193a7 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -12,7 +12,6 @@ aliases: - romfreiman - tsorya - yevgeny-shnaidman - - yuvigold - nmagnezi - carbonin - rollandf From 87aec67009198b7b3a838984d61a0a9ff616201f Mon Sep 17 00:00:00 2001 From: Igal Tsoiref Date: Mon, 28 Jun 2021 17:55:12 +0300 Subject: [PATCH 43/43] NO-ISSUE: moving images to be multiplatform --- Dockerfile.assisted-installer | 12 +++++++++--- Dockerfile.assisted-installer-controller | 12 ++++++++---- Makefile | 10 +++++----- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/Dockerfile.assisted-installer b/Dockerfile.assisted-installer index 37f667ce6..0f05aea40 100644 --- a/Dockerfile.assisted-installer +++ b/Dockerfile.assisted-installer @@ -1,4 +1,9 @@ -FROM registry.ci.openshift.org/openshift/release:golang-1.16 AS builder +FROM --platform=$BUILDPLATFORM golang:1.16 AS builder + +ARG TARGETPLATFORM + +WORKDIR /go/src/github.com/openshift/assisted-installer-agent + ENV GOFLAGS=-mod=mod WORKDIR /go/src/github.com/openshift/assisted-installer @@ -9,9 +14,10 @@ COPY go.sum go.sum RUN go mod download COPY . . -RUN make installer -FROM quay.io/centos/centos:stream8 +RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then GO_BUILD_ENV_PARAMS="GOOS=linux GOARCH=arm64" make installer;else make installer; fi + +FROM --platform=$TARGETPLATFORM quay.io/centos/centos:centos8 COPY --from=builder /go/src/github.com/openshift/assisted-installer/build/installer /usr/bin/installer COPY --from=builder /go/src/github.com/openshift/assisted-installer/deploy/assisted-installer-controller /assisted-installer-controller/deploy diff --git a/Dockerfile.assisted-installer-controller b/Dockerfile.assisted-installer-controller index 2e10c906b..367db08c3 100644 --- a/Dockerfile.assisted-installer-controller +++ b/Dockerfile.assisted-installer-controller @@ -1,7 +1,10 @@ -FROM quay.io/openshift/origin-cli:4.9.0 AS cli +FROM --platform=$BUILDPLATFORM quay.io/openshift/origin-cli:4.9.0 AS cli -FROM registry.ci.openshift.org/openshift/release:golang-1.16 AS builder +FROM --platform=$BUILDPLATFORM golang:1.16 AS builder + +ARG TARGETPLATFORM ENV GOFLAGS=-mod=mod + WORKDIR /go/src/github.com/openshift/assisted-installer # Bring in the go dependencies before anything else so we can take @@ -11,9 +14,10 @@ COPY go.sum go.sum RUN go mod download COPY . . -RUN make controller -FROM quay.io/centos/centos:stream8 +RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then GO_BUILD_ENV_PARAMS="GOOS=linux GOARCH=arm64" make controller;else make controller; fi + +FROM --platform=$TARGETPLATFORM quay.io/centos/centos:centos8 RUN yum -y install make gcc unzip wget curl rsync && yum clean all COPY --from=builder /go/src/github.com/openshift/assisted-installer/build/assisted-installer-controller /usr/bin/assisted-installer-controller diff --git a/Makefile b/Makefile index b4940cd21..641ba3042 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ NAMESPACE := $(or ${NAMESPACE},assisted-installer) GIT_REVISION := $(shell git rev-parse HEAD) PUBLISH_TAG := $(or ${GIT_REVISION}) -CONTAINER_BUILD_PARAMS = --network=host --label git_revision=${GIT_REVISION} +CONTAINER_BUILD_PARAMS = --label git_revision=${GIT_REVISION} REPORTS ?= $(ROOT_DIR)/reports CI ?= false @@ -53,18 +53,18 @@ endif build: installer controller installer: - CGO_ENABLED=0 go build -o build/installer src/main/main.go + $(GO_BUILD_ENV_PARAMS) CGO_ENABLED=0 go build -o build/installer src/main/main.go controller: - CGO_ENABLED=0 go build -o build/assisted-installer-controller src/main/assisted-installer-controller/assisted_installer_main.go + $(GO_BUILD_ENV_PARAMS) CGO_ENABLED=0 go build -o build/assisted-installer-controller src/main/assisted-installer-controller/assisted_installer_main.go build-images: installer-image controller-image installer-image: - $(CONTAINER_COMMAND) build $(CONTAINER_BUILD_PARAMS) -f Dockerfile.assisted-installer . -t $(INSTALLER) + docker buildx build --platform=linux/arm64,linux/amd64 $(CONTAINER_BUILD_PARAMS) -f Dockerfile.assisted-installer . -t $(INSTALLER) --push controller-image: - $(CONTAINER_COMMAND) build $(CONTAINER_BUILD_PARAMS) -f Dockerfile.assisted-installer-controller . -t $(CONTROLLER) + docker buildx build --platform=linux/arm64,linux/amd64 $(CONTAINER_BUILD_PARAMS) -f Dockerfile.assisted-installer-controller . -t $(CONTROLLER) --push push-installer: installer-image $(CONTAINER_COMMAND) push $(INSTALLER)