-
Notifications
You must be signed in to change notification settings - Fork 319
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
OSASINFRA-3492: openstack: leverage ORC to handle RHCOS image #5139
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -54,7 +54,7 @@ func defaultImage(releaseImage *releaseinfo.ReleaseImage) (string, string, error | |
return containerImage, split[1], nil | ||
} | ||
|
||
func unsupportedOpenstackDefaultImage(releaseImage *releaseinfo.ReleaseImage) (string, string, error) { | ||
func UnsupportedOpenstackDefaultImage(releaseImage *releaseinfo.ReleaseImage) (string, string, error) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We probably want to move that function to utils or something |
||
arch, foundArch := releaseImage.StreamMetadata.Architectures["x86_64"] | ||
if !foundArch { | ||
return "", "", fmt.Errorf("couldn't find OS metadata for architecture %q", "x64_64") | ||
|
@@ -101,7 +101,7 @@ func GetImage(nodePool *hyperv1.NodePool, releaseImage *releaseinfo.ReleaseImage | |
|
||
imageName, imageHash, err := defaultImage(releaseImage) | ||
if err != nil && allowUnsupportedRHCOSVariants(nodePool) { | ||
imageName, imageHash, err = unsupportedOpenstackDefaultImage(releaseImage) | ||
imageName, imageHash, err = UnsupportedOpenstackDefaultImage(releaseImage) | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,11 +1,19 @@ | ||
package openstack | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
|
||
hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
|
||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
|
||
orc "github.com/k-orc/openstack-resource-controller/api/v1alpha1" | ||
nodepoolkubevirt "github.com/openshift/hypershift/hypershift-operator/controllers/nodepool/kubevirt" | ||
"github.com/openshift/hypershift/support/openstackutil" | ||
"github.com/openshift/hypershift/support/releaseinfo" | ||
"github.com/openshift/hypershift/support/upsert" | ||
"k8s.io/utils/ptr" | ||
capiopenstackv1beta1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" | ||
) | ||
|
@@ -20,12 +28,9 @@ func MachineTemplateSpec(hcluster *hyperv1.HostedCluster, nodePool *hyperv1.Node | |
Name: ptr.To(nodePool.Spec.Platform.OpenStack.ImageName), | ||
} | ||
} else { | ||
// TODO(emilien): Add support for using the image from the release payload. | ||
// This will be possible when CAPO supports managing images in the OpenStack cluster: | ||
// https://github.com/kubernetes-sigs/cluster-api-provider-openstack/pull/2130 | ||
// For 4.17 we might leave this as is and let the user provide the image name as | ||
// we plan to deliver the OpenStack provider as a dev preview. | ||
return nil, fmt.Errorf("image name is required") | ||
openStackMachineTemplate.Template.Spec.Image.ImageRef = &capiopenstackv1beta1.ResourceReference{ | ||
Name: "rhcos-" + hcluster.Name, | ||
} | ||
} | ||
|
||
// TODO: add support for BYO network/subnet | ||
|
@@ -72,3 +77,54 @@ func MachineTemplateSpec(hcluster *hyperv1.HostedCluster, nodePool *hyperv1.Node | |
} | ||
return openStackMachineTemplate, nil | ||
} | ||
|
||
func ReconcileOpenStackImageCR(ctx context.Context, client client.Client, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, controlPlaneNamespace string, release *releaseinfo.ReleaseImage) error { | ||
openStackImage := orc.Image{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: "rhcos-" + hcluster.Name, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. TODO: name |
||
Namespace: controlPlaneNamespace, | ||
}, | ||
Spec: orc.ImageSpec{}, | ||
} | ||
|
||
if _, err := createOrUpdate(ctx, client, &openStackImage, func() error { | ||
err := reconcileOpenStackImageSpec(ctx, client, hcluster, &openStackImage.Spec, release) | ||
if err != nil { | ||
return err | ||
} | ||
return nil | ||
}); err != nil { | ||
return err | ||
} | ||
return nil | ||
} | ||
|
||
func reconcileOpenStackImageSpec(ctx context.Context, client client.Client, hcluster *hyperv1.HostedCluster, openStackImageSpec *orc.ImageSpec, release *releaseinfo.ReleaseImage) error { | ||
imageURL, imageHash, err := nodepoolkubevirt.UnsupportedOpenstackDefaultImage(release) | ||
if err != nil { | ||
return fmt.Errorf("failed to lookup RHCOS image: %w", err) | ||
} | ||
|
||
openStackImageSpec.CloudCredentialsRef = orc.CloudCredentialsReference{ | ||
SecretName: hcluster.Spec.Platform.OpenStack.IdentityRef.Name, | ||
CloudName: hcluster.Spec.Platform.OpenStack.IdentityRef.CloudName, | ||
} | ||
|
||
openStackImageSpec.Resource = &orc.ImageResourceSpec{ | ||
// THIS IS NOT GOOD, NEEDS TO BE FIXED (something related to the image itself should be used, like version) | ||
Name: "rhcos-" + hcluster.Name, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. TODO: name |
||
Content: &orc.ImageContent{ | ||
DiskFormat: "qcow2", | ||
Download: &orc.ImageContentSourceDownload{ | ||
URL: imageURL, | ||
Decompress: ptr.To(orc.ImageCompressionGZ), | ||
Hash: &orc.ImageHash{ | ||
Algorithm: "sha256", | ||
Value: imageHash, | ||
}, | ||
}, | ||
}, | ||
} | ||
|
||
return nil | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,124 @@ | ||
//go:build e2e | ||
// +build e2e | ||
|
||
package e2e | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"testing" | ||
|
||
"github.com/google/go-cmp/cmp" | ||
hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" | ||
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests" | ||
e2eutil "github.com/openshift/hypershift/test/e2e/util" | ||
corev1 "k8s.io/api/core/v1" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/utils/ptr" | ||
capiopenstackv1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" | ||
capiopenstackv1beta1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" | ||
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1" | ||
"sigs.k8s.io/cluster-api/util" | ||
crclient "sigs.k8s.io/controller-runtime/pkg/client" | ||
) | ||
|
||
type OpenStackImageTest struct { | ||
DummyInfraSetup | ||
ctx context.Context | ||
managementClient crclient.Client | ||
hostedCluster *hyperv1.HostedCluster | ||
hostedControlPlaneNamespace string | ||
} | ||
|
||
func NewOpenStackImageTest(ctx context.Context, mgmtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) *OpenStackImageTest { | ||
return &OpenStackImageTest{ | ||
ctx: ctx, | ||
hostedCluster: hostedCluster, | ||
managementClient: mgmtClient, | ||
hostedControlPlaneNamespace: manifests.HostedControlPlaneNamespace(hostedCluster.Namespace, hostedCluster.Name), | ||
} | ||
} | ||
|
||
func (o OpenStackImageTest) Setup(t *testing.T) { | ||
t.Log("Starting test OpenStackImageTest") | ||
|
||
if globalOpts.Platform != hyperv1.OpenStackPlatform { | ||
t.Skip("test only supported on platform OpenStack") | ||
} | ||
|
||
if globalOpts.configurableClusterOptions.OpenStackNodeImageName == "" { | ||
t.Skip("OpenStack image name not provided, skipping test") | ||
} | ||
} | ||
|
||
func (o OpenStackImageTest) Run(t *testing.T, nodePool hyperv1.NodePool, _ []corev1.Node) { | ||
np := &hyperv1.NodePool{} | ||
e2eutil.EventuallyObject(t, o.ctx, "NodePool to have image configured", | ||
func(ctx context.Context) (*hyperv1.NodePool, error) { | ||
err := o.managementClient.Get(ctx, util.ObjectKey(&nodePool), np) | ||
return np, err | ||
}, | ||
[]e2eutil.Predicate[*hyperv1.NodePool]{ | ||
func(nodePool *hyperv1.NodePool) (done bool, reasons string, err error) { | ||
want, got := hyperv1.OpenStackPlatform, nodePool.Spec.Platform.Type | ||
return want == got, fmt.Sprintf("expected NodePool to have platform %s, got %s", want, got), nil | ||
}, | ||
func(pool *hyperv1.NodePool) (done bool, reasons string, err error) { | ||
diff := cmp.Diff(globalOpts.configurableClusterOptions.OpenStackNodeImageName, ptr.Deref(np.Spec.Platform.OpenStack, hyperv1.OpenStackNodePoolPlatform{}).ImageName) | ||
return diff == "", fmt.Sprintf("incorrect image name: %v", diff), nil | ||
}, | ||
}, | ||
) | ||
|
||
e2eutil.EventuallyObjects(t, o.ctx, "OpenStackServers to be created with the correct image", | ||
func(ctx context.Context) ([]*capiopenstackv1beta1.OpenStackMachine, error) { | ||
list := &capiopenstackv1beta1.OpenStackMachineList{} | ||
err := o.managementClient.List(ctx, list, crclient.InNamespace(o.hostedControlPlaneNamespace), crclient.MatchingLabels{capiv1.MachineDeploymentNameLabel: nodePool.Name}) | ||
oms := make([]*capiopenstackv1beta1.OpenStackMachine, len(list.Items)) | ||
for i := range list.Items { | ||
oms[i] = &list.Items[i] | ||
} | ||
return oms, err | ||
}, | ||
[]e2eutil.Predicate[[]*capiopenstackv1beta1.OpenStackMachine]{ | ||
func(machines []*capiopenstackv1beta1.OpenStackMachine) (done bool, reasons string, err error) { | ||
return len(machines) == int(*nodePool.Spec.Replicas), fmt.Sprintf("expected %d OpenStackMachines, got %d", *nodePool.Spec.Replicas, len(machines)), nil | ||
}, | ||
}, | ||
[]e2eutil.Predicate[*capiopenstackv1beta1.OpenStackMachine]{ | ||
func(machine *capiopenstackv1beta1.OpenStackMachine) (done bool, reasons string, err error) { | ||
server := &capiopenstackv1alpha1.OpenStackServer{} | ||
err = o.managementClient.Get(o.ctx, crclient.ObjectKey{Name: machine.Name, Namespace: o.hostedControlPlaneNamespace}, server) | ||
if err != nil { | ||
return false, "", err | ||
} | ||
if server.Spec.Image.Filter != nil && *server.Spec.Image.Filter.Name != globalOpts.configurableClusterOptions.OpenStackNodeImageName { | ||
return false, fmt.Sprintf("expected image name %s, got %s", globalOpts.configurableClusterOptions.OpenStackNodeImageName, *server.Spec.Image.Filter.Name), nil | ||
} | ||
return true, "", nil | ||
}, | ||
}, | ||
) | ||
} | ||
|
||
func (o OpenStackImageTest) BuildNodePoolManifest(defaultNodepool hyperv1.NodePool) (*hyperv1.NodePool, error) { | ||
nodePool := &hyperv1.NodePool{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: o.hostedCluster.Name + "-" + "test-osp-image", | ||
Namespace: o.hostedCluster.Namespace, | ||
}, | ||
} | ||
defaultNodepool.Spec.DeepCopyInto(&nodePool.Spec) | ||
|
||
nodePool.Spec.Replicas = &oneReplicas | ||
nodePool.Spec.Platform.OpenStack.ImageName = globalOpts.configurableClusterOptions.OpenStackNodeImageName | ||
return nodePool, nil | ||
} | ||
|
||
func (o OpenStackImageTest) SetupInfra(t *testing.T) error { | ||
return nil | ||
} | ||
|
||
func (o OpenStackImageTest) TeardownInfra(t *testing.T) error { | ||
return nil | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't know if this pattern will be accepted. Suggestions are welcome!
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@bryan-cox I realized that we probably need to handle the OpenStack RHCOS image by Nodepool, since the release image can be overridden per Nodepool?