diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index acbd89a4..c7cea94d 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -16,7 +16,7 @@ concurrency: cancel-in-progress: true env: - VCLUSTER_VERSION: v0.20.0-alpha.2 + VCLUSTER_VERSION: v0.20.0-alpha.4 VCLUSTER_SUFFIX: vcluster VCLUSTER_NAME: vcluster VCLUSTER_NAMESPACE: vcluster diff --git a/e2e/plugin/plugin.go b/e2e/plugin/plugin.go index 6159814f..a03db5ae 100644 --- a/e2e/plugin/plugin.go +++ b/e2e/plugin/plugin.go @@ -192,4 +192,14 @@ var _ = ginkgo.Describe("Plugin test", func() { WithTimeout(pollingDurationLong). Should(gomega.BeTrue()) }) + + ginkgo.It("check the interceptor", func() { + // wait for secret to become synced + vPod := &corev1.Pod{} + err := f.VclusterCRClient.Get(f.Context, types.NamespacedName{Name: "stuff", Namespace: "test"}, vPod) + framework.ExpectNoError(err) + + // check if secret is synced correctly + framework.ExpectEqual(vPod.Name, "definitelynotstuff") + }) }) diff --git a/e2e/test_plugin/main.go b/e2e/test_plugin/main.go index 3d2bb504..10c9e0f2 100644 --- a/e2e/test_plugin/main.go +++ b/e2e/test_plugin/main.go @@ -31,6 +31,9 @@ func main() { plugin.MustRegister(syncers.NewMyDeploymentSyncer(ctx)) plugin.MustRegister(syncers.NewCarSyncer(ctx)) plugin.MustRegister(syncers.NewImportSecrets(ctx)) + plugin.MustRegister(syncers.DummyInterceptor{}) + + klog.Info("finished registering the plugins") plugin.MustStart() } diff --git a/e2e/test_plugin/syncers/interceptor.go b/e2e/test_plugin/syncers/interceptor.go new file mode 100644 index 00000000..15bf3d00 --- /dev/null +++ b/e2e/test_plugin/syncers/interceptor.go @@ -0,0 +1,55 @@ +package syncers + +import ( + "net/http" + + "github.com/loft-sh/vcluster-sdk/plugin" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + + v2 "github.com/loft-sh/vcluster/pkg/plugin/v2" + corev1 "k8s.io/api/core/v1" +) + +var _ plugin.Interceptor = DummyInterceptor{} + +type DummyInterceptor struct { +} + +func (d DummyInterceptor) ServeHTTP(w http.ResponseWriter, r *http.Request) { + scheme := runtime.NewScheme() + clientgoscheme.AddToScheme(scheme) + + s := serializer.NewCodecFactory(scheme) + responsewriters.WriteObjectNegotiated( + s, + negotiation.DefaultEndpointRestrictions, + schema.GroupVersion{ + Group: "", + Version: "v1"}, + w, + r, + 200, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "definitelynotstuff"}}, + false) +} + +func (d DummyInterceptor) Name() string { + return "testinterceptor" +} + +func (d DummyInterceptor) InterceptionRules() []v2.InterceptorRule { + return []v2.InterceptorRule{ + { + APIGroups: []string{"*"}, + Resources: []string{"pods"}, + ResourceNames: []string{"*"}, + Verbs: []string{"get"}, + }, + } +} diff --git a/go.mod b/go.mod index 368f0754..cf30855a 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/hashicorp/go-plugin v1.6.0 github.com/loft-sh/log v0.0.0-20230824104949-bd516c25712a - github.com/loft-sh/vcluster v0.20.0-alpha.2.0.20240403130844-8bb987ed97b4 + github.com/loft-sh/vcluster v0.20.0-alpha.3.0.20240409111424-27cde82f6544 github.com/onsi/ginkgo/v2 v2.14.0 github.com/onsi/gomega v1.30.0 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index c28776ad..16363bdc 100644 --- a/go.sum +++ b/go.sum @@ -630,6 +630,10 @@ github.com/loft-sh/utils v0.0.29 h1:P/MObccXToAZy2QoJSQDJ+OJx1qHitpFHEVj3QBSNJs= github.com/loft-sh/utils v0.0.29/go.mod h1:9hlX9cGpWHg3mNi/oBlv3X4ePGDMK66k8MbOZGFMDTI= github.com/loft-sh/vcluster v0.20.0-alpha.2.0.20240403130844-8bb987ed97b4 h1:D486kUE5ZQSx2DDQQ5kf/gowKZG/dNw7As4vg9WR7dw= github.com/loft-sh/vcluster v0.20.0-alpha.2.0.20240403130844-8bb987ed97b4/go.mod h1:Ty7km/e/U7wVF9kIqROOR51/XezgHEtS/za0QVPhKkU= +github.com/loft-sh/vcluster v0.20.0-alpha.3.0.20240409074019-5b5358d2f54d h1:nTQLvvJS4b9tEBMk4s2yQkxnW/9MW5rZl3eCDqeKarE= +github.com/loft-sh/vcluster v0.20.0-alpha.3.0.20240409074019-5b5358d2f54d/go.mod h1:Ty7km/e/U7wVF9kIqROOR51/XezgHEtS/za0QVPhKkU= +github.com/loft-sh/vcluster v0.20.0-alpha.3.0.20240409111424-27cde82f6544 h1:/UxPl3HXlCwT8whH57NanvcInguAWgubal2QxvSGhoE= +github.com/loft-sh/vcluster v0.20.0-alpha.3.0.20240409111424-27cde82f6544/go.mod h1:Ty7km/e/U7wVF9kIqROOR51/XezgHEtS/za0QVPhKkU= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= diff --git a/plugin/manager.go b/plugin/manager.go index c2fc266b..26b9bb51 100644 --- a/plugin/manager.go +++ b/plugin/manager.go @@ -4,7 +4,9 @@ import ( "context" "encoding/json" "fmt" + "net/http" "os" + "strconv" "sync" "github.com/ghodss/yaml" @@ -22,6 +24,7 @@ import ( "github.com/loft-sh/vcluster/pkg/util/clienthelper" contextutil "github.com/loft-sh/vcluster/pkg/util/context" "github.com/pkg/errors" + "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" @@ -30,7 +33,9 @@ import ( ) func newManager() Manager { - return &manager{} + return &manager{ + interceptorsHandlers: make(map[string]http.Handler), + } } type manager struct { @@ -47,6 +52,10 @@ type manager struct { syncers []syncertypes.Base + interceptorsHandlers map[string]http.Handler + interceptors []Interceptor + interceptorsPort int + proConfig v2.InitConfigPro options Options @@ -106,6 +115,7 @@ func (m *manager) InitWithOptions(options Options) (*synccontext.RegisterContext if err != nil { return nil, fmt.Errorf("error decoding init config %s: %w", initRequest.Config, err) } + m.interceptorsPort = initConfig.Port // try to change working dir if initConfig.WorkingDir != "" { @@ -177,7 +187,15 @@ func (m *manager) Register(syncer syncertypes.Base) error { m.m.Lock() defer m.m.Unlock() - m.syncers = append(m.syncers, syncer) + if int, ok := syncer.(Interceptor); ok { + if _, ok := m.interceptorsHandlers[int.Name()]; ok { + return fmt.Errorf("could not add the interceptor %s because its name is already in use", int.Name()) + } + m.interceptorsHandlers[int.Name()] = int + m.interceptors = append(m.interceptors, int) + } else { + m.syncers = append(m.syncers, syncer) + } return nil } @@ -191,6 +209,24 @@ func (m *manager) Start() error { return nil } +func (m *manager) startInterceptors() error { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handlerName := r.Header.Get("VCluster-Plugin-Handler-Name") + if handlerName == "" { + responsewriters.InternalError(w, r, errors.New("header VCluster-Plugin-Handler-Name wasn't set")) + return + } + interceptorHandler, ok := m.interceptorsHandlers[handlerName] + if !ok { + responsewriters.InternalError(w, r, errors.New("header VCluster-Plugin-Handler-Name had no match")) + return + } + interceptorHandler.ServeHTTP(w, r) + }) + + return http.ListenAndServe("127.0.0.1:"+strconv.Itoa(m.interceptorsPort), handler) +} + func (m *manager) start() error { m.m.Lock() defer m.m.Unlock() @@ -206,9 +242,23 @@ func (m *manager) start() error { return fmt.Errorf("find all hooks: %w", err) } + // find the interceptors + interceptors := m.findAllInterceptors() + // signal we are ready - m.pluginServer.SetReady(hooks) + m.pluginServer.SetReady(hooks, interceptors, m.interceptorsPort) + if len(m.interceptors) > 0 { + go func() { + // we need to start them regardless of being the leader, since the traffic is + // directed to all replicas + err := m.startInterceptors() + if err != nil { + klog.Error(err, "error while running the http interceptors:") + os.Exit(1) + } + }() + } // wait until we are leader to continue <-m.pluginServer.IsLeader() @@ -305,6 +355,11 @@ func (m *manager) start() error { return nil } +func (m *manager) findAllInterceptors() []Interceptor { + klog.Info("len of m.interceptor is : ", len(m.interceptors)) + return m.interceptors +} + func (m *manager) findAllHooks() (map[types.VersionKindType][]ClientHook, error) { // gather all hooks hooks := map[types.VersionKindType][]ClientHook{} diff --git a/plugin/server.go b/plugin/server.go index d5bc5329..9564bfd7 100644 --- a/plugin/server.go +++ b/plugin/server.go @@ -21,7 +21,7 @@ type server interface { Serve() // SetReady signals the plugin server the plugin is ready to start - SetReady(hooks map[types.VersionKindType][]ClientHook) + SetReady(hooks map[types.VersionKindType][]ClientHook, interceptors []Interceptor, port int) // Initialized retrieves the initialize request Initialized() <-chan *pluginv2.Initialize_Request @@ -43,7 +43,9 @@ func newPluginServer() (server, error) { type pluginServer struct { pluginv2.UnimplementedPluginServer - hooks map[types.VersionKindType][]ClientHook + hooks map[types.VersionKindType][]ClientHook + interceptors []Interceptor + interceptorsPort int initialized chan *pluginv2.Initialize_Request isReady chan struct{} @@ -88,8 +90,10 @@ func (p *pluginServer) IsLeader() <-chan struct{} { return p.isLeader } -func (p *pluginServer) SetReady(hooks map[types.VersionKindType][]ClientHook) { +func (p *pluginServer) SetReady(hooks map[types.VersionKindType][]ClientHook, interceptors []Interceptor, port int) { p.hooks = hooks + p.interceptors = interceptors + p.interceptorsPort = port close(p.isReady) } @@ -216,9 +220,11 @@ func (p *pluginServer) GetPluginConfig(context.Context, *pluginv2.GetPluginConfi return nil, err } + interceptorConfig := p.getInterceptorConfig() // build plugin config pluginConfig := &v2.PluginConfig{ - ClientHooks: clientHooks, + ClientHooks: clientHooks, + Interceptors: interceptorConfig, } // marshal plugin config @@ -265,6 +271,15 @@ func (p *pluginServer) getClientHooks() ([]*v2.ClientHook, error) { return registeredHooks, nil } +func (p *pluginServer) getInterceptorConfig() map[string][]v2.InterceptorRule { + res := make(map[string][]v2.InterceptorRule) + for _, interceptor := range p.interceptors { + res[interceptor.Name()] = interceptor.InterceptionRules() + } + + return res +} + var _ plugin.Plugin = &pluginServer{} // Server always returns an error; we're only implementing the GRPCPlugin diff --git a/plugin/types.go b/plugin/types.go index e169e96e..696ae96c 100644 --- a/plugin/types.go +++ b/plugin/types.go @@ -2,6 +2,7 @@ package plugin import ( "context" + "net/http" synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" v2 "github.com/loft-sh/vcluster/pkg/plugin/v2" @@ -58,6 +59,16 @@ type ClientHook interface { Resource() client.Object } +type Interceptor interface { + syncertypes.Base + + // Handler is the handler that will handle the requests delegated by the syncer + http.Handler + + // InterceptionRules returns an rbac style struct which defines what to intercept + InterceptionRules() []v2.InterceptorRule +} + type MutateCreateVirtual interface { MutateCreateVirtual(ctx context.Context, obj client.Object) (client.Object, error) } diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/LICENSE.txt b/vendor/github.com/AlecAivazis/survey/v2/terminal/LICENSE.txt index 82e7ce0d..ade5fef6 100644 --- a/vendor/github.com/AlecAivazis/survey/v2/terminal/LICENSE.txt +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/LICENSE.txt @@ -1,22 +1,22 @@ -Copyright (c) 2014 Takashi Kokubun - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Copyright (c) 2014 Takashi Kokubun + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create/pro.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create/pro.go deleted file mode 100644 index 1993d0d8..00000000 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create/pro.go +++ /dev/null @@ -1,621 +0,0 @@ -package create - -import ( - "context" - "fmt" - "os" - "strconv" - "strings" - "time" - - "github.com/ghodss/yaml" - "github.com/go-logr/logr" - clusterv1 "github.com/loft-sh/agentapi/v3/pkg/apis/loft/cluster/v1" - agentstoragev1 "github.com/loft-sh/agentapi/v3/pkg/apis/loft/storage/v1" - managementv1 "github.com/loft-sh/api/v3/pkg/apis/management/v1" - storagev1 "github.com/loft-sh/api/v3/pkg/apis/storage/v1" - "github.com/loft-sh/loftctl/v3/cmd/loftctl/cmd/create" - proclient "github.com/loft-sh/loftctl/v3/pkg/client" - "github.com/loft-sh/loftctl/v3/pkg/client/helper" - "github.com/loft-sh/loftctl/v3/pkg/client/naming" - "github.com/loft-sh/loftctl/v3/pkg/config" - "github.com/loft-sh/loftctl/v3/pkg/vcluster" - "github.com/loft-sh/log" - vclusterconfig "github.com/loft-sh/vcluster/config" - "github.com/loft-sh/vcluster/pkg/procli" - "github.com/loft-sh/vcluster/pkg/strvals" - "github.com/loft-sh/vcluster/pkg/telemetry" - "github.com/loft-sh/vcluster/pkg/upgrade" - "github.com/loft-sh/vcluster/pkg/util" - "github.com/loft-sh/vcluster/pkg/util/cliconfig" - "golang.org/x/mod/semver" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const LoftChartRepo = "https://charts.loft.sh" - -var AllowedDistros = []string{"k3s", "k0s", "k8s", "eks"} - -func DeployProCluster(ctx context.Context, options *Options, proClient procli.Client, virtualClusterName, targetNamespace string, log log.Logger) error { - // determine project & cluster name - var err error - options.Cluster, options.Project, err = helper.SelectProjectOrCluster(proClient, options.Cluster, options.Project, false, log) - if err != nil { - return err - } - - virtualClusterNamespace := naming.ProjectNamespace(options.Project) - managementClient, err := proClient.Management() - if err != nil { - return err - } - - // make sure there is not existing virtual cluster - var virtualClusterInstance *managementv1.VirtualClusterInstance - virtualClusterInstance, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(virtualClusterNamespace).Get(ctx, virtualClusterName, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return fmt.Errorf("couldn't retrieve virtual cluster instance: %w", err) - } else if err == nil && !virtualClusterInstance.DeletionTimestamp.IsZero() { - log.Infof("Waiting until virtual cluster is deleted...") - - // wait until the virtual cluster instance is deleted - waitErr := wait.PollUntilContextTimeout(ctx, time.Second, config.Timeout(), false, func(ctx context.Context) (done bool, err error) { - virtualClusterInstance, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(virtualClusterNamespace).Get(ctx, virtualClusterName, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return false, err - } else if err == nil && virtualClusterInstance.DeletionTimestamp != nil { - return false, nil - } - - return true, nil - }) - if waitErr != nil { - return fmt.Errorf("get virtual cluster instance: %w", err) - } - - virtualClusterInstance = nil - } else if kerrors.IsNotFound(err) { - virtualClusterInstance = nil - } - - // if the virtual cluster already exists and flag is not set, we terminate - if !options.Upgrade && virtualClusterInstance != nil { - return fmt.Errorf("virtual cluster %s already exists in project %s", virtualClusterName, options.Project) - } - - // should create via template - useTemplate, err := shouldCreateWithTemplate(ctx, proClient, options, virtualClusterInstance) - if err != nil { - return fmt.Errorf("should use template: %w", err) - } - - // create virtual cluster if necessary - if useTemplate { - if virtualClusterInstance == nil { - // create via template - virtualClusterInstance, err = createWithTemplate(ctx, proClient, options, virtualClusterName, log) - if err != nil { - return err - } - } else { - // upgrade via template - virtualClusterInstance, err = upgradeWithTemplate(ctx, proClient, options, virtualClusterInstance, log) - if err != nil { - return err - } - } - } else { - if virtualClusterInstance == nil { - // create without template - virtualClusterInstance, err = createWithoutTemplate(ctx, proClient, options, virtualClusterName, targetNamespace, log) - if err != nil { - return err - } - } else { - // upgrade via template - virtualClusterInstance, err = upgradeWithoutTemplate(ctx, proClient, options, virtualClusterInstance, log) - if err != nil { - return err - } - } - } - - // wait until virtual cluster is ready - virtualClusterInstance, err = vcluster.WaitForVirtualClusterInstance(ctx, managementClient, virtualClusterInstance.Namespace, virtualClusterInstance.Name, true, log) - if err != nil { - return err - } - log.Donef("Successfully created the virtual cluster %s in project %s", virtualClusterName, options.Project) - - return nil -} - -func createWithoutTemplate(ctx context.Context, proClient procli.Client, options *Options, virtualClusterName, targetNamespace string, log log.Logger) (*managementv1.VirtualClusterInstance, error) { - err := validateNoTemplateOptions(options) - if err != nil { - return nil, err - } - - // merge values - helmValues, err := mergeValues(proClient, options, log) - if err != nil { - return nil, err - } - - // create virtual cluster instance - zone, offset := time.Now().Zone() - virtualClusterInstance := &managementv1.VirtualClusterInstance{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: naming.ProjectNamespace(options.Project), - Name: virtualClusterName, - Annotations: map[string]string{ - clusterv1.SleepModeTimezoneAnnotation: zone + "#" + strconv.Itoa(offset), - }, - }, - Spec: managementv1.VirtualClusterInstanceSpec{ - VirtualClusterInstanceSpec: storagev1.VirtualClusterInstanceSpec{ - Template: &storagev1.VirtualClusterTemplateDefinition{ - VirtualClusterCommonSpec: agentstoragev1.VirtualClusterCommonSpec{ - HelmRelease: agentstoragev1.VirtualClusterHelmRelease{ - Chart: agentstoragev1.VirtualClusterHelmChart{ - Name: options.ChartName, - Repo: options.ChartRepo, - Version: options.ChartVersion, - }, - Values: helmValues, - }, - ForwardToken: true, - Pro: agentstoragev1.VirtualClusterProSpec{ - Enabled: true, - }, - }, - }, - ClusterRef: storagev1.VirtualClusterClusterRef{ - ClusterRef: storagev1.ClusterRef{ - Cluster: options.Cluster, - Namespace: targetNamespace, - }, - }, - }, - }, - } - - // set links - create.SetCustomLinksAnnotation(virtualClusterInstance, options.Links) - - // set labels - _, err = create.UpdateLabels(virtualClusterInstance, options.Labels) - if err != nil { - return nil, err - } - - // set annotations - _, err = create.UpdateAnnotations(virtualClusterInstance, options.Annotations) - if err != nil { - return nil, err - } - - // get management client - managementClient, err := proClient.Management() - if err != nil { - return nil, err - } - - // create virtualclusterinstance - log.Infof("Creating virtual cluster %s in project %s...", virtualClusterName, options.Project) - virtualClusterInstance, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(virtualClusterInstance.Namespace).Create(ctx, virtualClusterInstance, metav1.CreateOptions{}) - if err != nil { - return nil, fmt.Errorf("create virtual cluster: %w", err) - } - - return virtualClusterInstance, nil -} - -func upgradeWithoutTemplate(ctx context.Context, proClient procli.Client, options *Options, virtualClusterInstance *managementv1.VirtualClusterInstance, log log.Logger) (*managementv1.VirtualClusterInstance, error) { - err := validateNoTemplateOptions(options) - if err != nil { - return nil, err - } - - // merge values - helmValues, err := mergeValues(proClient, options, log) - if err != nil { - return nil, err - } - - // update virtual cluster instance - if virtualClusterInstance.Spec.Template == nil { - return nil, fmt.Errorf("virtual cluster instance uses a template, cannot update virtual cluster") - } - - oldVirtualCluster := virtualClusterInstance.DeepCopy() - chartNameChanged := virtualClusterInstance.Spec.Template.HelmRelease.Chart.Name != options.ChartName - if chartNameChanged { - return nil, fmt.Errorf("cannot change chart name from '%s' to '%s', this operation is not allowed", virtualClusterInstance.Spec.Template.HelmRelease.Chart.Name, options.ChartName) - } - - chartRepoChanged := virtualClusterInstance.Spec.Template.HelmRelease.Chart.Repo != options.ChartRepo - chartVersionChanged := virtualClusterInstance.Spec.Template.HelmRelease.Chart.Version != options.ChartVersion - valuesChanged := virtualClusterInstance.Spec.Template.HelmRelease.Values != helmValues - - // set links - linksChanged := create.SetCustomLinksAnnotation(virtualClusterInstance, options.Links) - - // set labels - labelsChanged, err := create.UpdateLabels(virtualClusterInstance, options.Labels) - if err != nil { - return nil, err - } - - // set annotations - annotationsChanged, err := create.UpdateAnnotations(virtualClusterInstance, options.Annotations) - if err != nil { - return nil, err - } - - // check if update is needed - if chartRepoChanged || chartVersionChanged || valuesChanged || linksChanged || labelsChanged || annotationsChanged { - virtualClusterInstance.Spec.Template.HelmRelease.Chart.Repo = options.ChartRepo - virtualClusterInstance.Spec.Template.HelmRelease.Chart.Version = options.ChartVersion - virtualClusterInstance.Spec.Template.HelmRelease.Values = helmValues - - // get management client - managementClient, err := proClient.Management() - if err != nil { - return nil, err - } - - patch := client.MergeFrom(oldVirtualCluster) - patchData, err := patch.Data(virtualClusterInstance) - if err != nil { - return nil, fmt.Errorf("calculate update patch: %w", err) - } - log.Infof("Updating virtual cluster %s in project %s...", virtualClusterInstance.Name, options.Project) - virtualClusterInstance, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(virtualClusterInstance.Namespace).Patch(ctx, virtualClusterInstance.Name, patch.Type(), patchData, metav1.PatchOptions{}) - if err != nil { - return nil, fmt.Errorf("patch virtual cluster: %w", err) - } - } else { - log.Infof("Skip updating virtual cluster...") - } - - return virtualClusterInstance, nil -} - -func shouldCreateWithTemplate(ctx context.Context, proClient proclient.Client, options *Options, virtualClusterInstance *managementv1.VirtualClusterInstance) (bool, error) { - virtualClusterInstanceHasTemplate := virtualClusterInstance != nil && virtualClusterInstance.Spec.TemplateRef != nil - virtualClusterInstanceHasNoTemplate := virtualClusterInstance != nil && virtualClusterInstance.Spec.TemplateRef == nil - if virtualClusterInstanceHasTemplate || options.Template != "" { - return true, nil - } else if virtualClusterInstanceHasNoTemplate { - return false, nil - } - - managementClient, err := proClient.Management() - if err != nil { - return false, err - } - - project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, options.Project, metav1.GetOptions{}) - if err != nil { - return false, fmt.Errorf("get vCluster project: %w", err) - } - - // check if there is a default template - for _, template := range project.Spec.AllowedTemplates { - if template.Kind == "VirtualClusterTemplate" && template.IsDefault { - return true, nil - } - } - - // check if we can create without - if project.Spec.RequireTemplate.Disabled { - return false, nil - } - - return true, nil -} - -func createWithTemplate(ctx context.Context, proClient proclient.Client, options *Options, virtualClusterName string, log log.Logger) (*managementv1.VirtualClusterInstance, error) { - err := validateTemplateOptions(options) - if err != nil { - return nil, err - } - - // resolve template - virtualClusterTemplate, resolvedParameters, err := create.ResolveTemplate( - proClient, - options.Project, - options.Template, - options.TemplateVersion, - options.SetParams, - options.Params, - log, - ) - if err != nil { - return nil, err - } - - // create virtual cluster instance - zone, offset := time.Now().Zone() - virtualClusterInstance := &managementv1.VirtualClusterInstance{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: naming.ProjectNamespace(options.Project), - Name: virtualClusterName, - Annotations: map[string]string{ - clusterv1.SleepModeTimezoneAnnotation: zone + "#" + strconv.Itoa(offset), - }, - }, - Spec: managementv1.VirtualClusterInstanceSpec{ - VirtualClusterInstanceSpec: storagev1.VirtualClusterInstanceSpec{ - TemplateRef: &storagev1.TemplateRef{ - Name: virtualClusterTemplate.Name, - Version: options.TemplateVersion, - }, - ClusterRef: storagev1.VirtualClusterClusterRef{ - ClusterRef: storagev1.ClusterRef{ - Cluster: options.Cluster, - }, - }, - Parameters: resolvedParameters, - }, - }, - } - - // set links - create.SetCustomLinksAnnotation(virtualClusterInstance, options.Links) - - // set labels - _, err = create.UpdateLabels(virtualClusterInstance, options.Labels) - if err != nil { - return nil, err - } - - // set annotations - _, err = create.UpdateAnnotations(virtualClusterInstance, options.Annotations) - if err != nil { - return nil, err - } - - // get management client - managementClient, err := proClient.Management() - if err != nil { - return nil, err - } - - // create virtual cluster instance - log.Infof("Creating virtual cluster %s in project %s with template %s...", virtualClusterName, options.Project, virtualClusterTemplate.Name) - virtualClusterInstance, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(virtualClusterInstance.Namespace).Create(ctx, virtualClusterInstance, metav1.CreateOptions{}) - if err != nil { - return nil, fmt.Errorf("create virtual cluster: %w", err) - } - - return virtualClusterInstance, nil -} - -func upgradeWithTemplate(ctx context.Context, proClient proclient.Client, options *Options, virtualClusterInstance *managementv1.VirtualClusterInstance, log log.Logger) (*managementv1.VirtualClusterInstance, error) { - err := validateTemplateOptions(options) - if err != nil { - return nil, err - } - - // resolve template - virtualClusterTemplate, resolvedParameters, err := create.ResolveTemplate( - proClient, - options.Project, - options.Template, - options.TemplateVersion, - options.SetParams, - options.Params, - log, - ) - if err != nil { - return nil, err - } - - // update virtual cluster instance - if virtualClusterInstance.Spec.TemplateRef == nil { - return nil, fmt.Errorf("virtual cluster instance doesn't use a template, cannot update virtual cluster") - } - - oldVirtualCluster := virtualClusterInstance.DeepCopy() - templateRefChanged := virtualClusterInstance.Spec.TemplateRef.Name != virtualClusterTemplate.Name - paramsChanged := virtualClusterInstance.Spec.Parameters != resolvedParameters - versionChanged := (options.TemplateVersion != "" && virtualClusterInstance.Spec.TemplateRef.Version != options.TemplateVersion) - linksChanged := create.SetCustomLinksAnnotation(virtualClusterInstance, options.Links) - - // set labels - labelsChanged, err := create.UpdateLabels(virtualClusterInstance, options.Labels) - if err != nil { - return nil, err - } - - // set annotations - annotationsChanged, err := create.UpdateAnnotations(virtualClusterInstance, options.Annotations) - if err != nil { - return nil, err - } - - // check if update is needed - if templateRefChanged || paramsChanged || versionChanged || linksChanged || labelsChanged || annotationsChanged { - virtualClusterInstance.Spec.TemplateRef.Name = virtualClusterTemplate.Name - virtualClusterInstance.Spec.TemplateRef.Version = options.TemplateVersion - virtualClusterInstance.Spec.Parameters = resolvedParameters - - // get management client - managementClient, err := proClient.Management() - if err != nil { - return nil, err - } - - patch := client.MergeFrom(oldVirtualCluster) - patchData, err := patch.Data(virtualClusterInstance) - if err != nil { - return nil, fmt.Errorf("calculate update patch: %w", err) - } - log.Infof("Updating virtual cluster %s in project %s...", virtualClusterInstance.Name, options.Project) - virtualClusterInstance, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(virtualClusterInstance.Namespace).Patch(ctx, virtualClusterInstance.Name, patch.Type(), patchData, metav1.PatchOptions{}) - if err != nil { - return nil, fmt.Errorf("patch virtual cluster: %w", err) - } - } else { - log.Infof("Skip updating virtual cluster...") - } - - return virtualClusterInstance, nil -} - -func validateNoTemplateOptions(options *Options) error { - if len(options.SetParams) > 0 { - return fmt.Errorf("cannot use --set-param because the vcluster is not using a template. Use --set instead") - } - if options.Params != "" { - return fmt.Errorf("cannot use --params because the vcluster is not using a template. Use --values instead") - } - if options.Template != "" { - return fmt.Errorf("cannot use --template because the vcluster is not using a template") - } - if options.TemplateVersion != "" { - return fmt.Errorf("cannot use --template-version because the vcluster is not using a template") - } - - return nil -} - -func validateTemplateOptions(options *Options) error { - if len(options.SetValues) > 0 { - return fmt.Errorf("cannot use --set because the vcluster is using a template. Please use --set-param instead") - } - if len(options.Values) > 0 { - return fmt.Errorf("cannot use --values because the vcluster is using a template. Please use --params instead") - } - if options.KubernetesVersion != "" { - return fmt.Errorf("cannot use --kubernetes-version because the vcluster is using a template") - } - if options.Distro != "" && options.Distro != "k3s" { - return fmt.Errorf("cannot use --distro because the vcluster is using a template") - } - if options.ChartName != "vcluster" { - return fmt.Errorf("cannot use --chart-name because the vcluster is using a template") - } - if options.ChartRepo != LoftChartRepo { - return fmt.Errorf("cannot use --chart-repo because the vcluster is using a template") - } - if options.ChartVersion != upgrade.GetVersion() { - return fmt.Errorf("cannot use --chart-version because the vcluster is using a template") - } - - return nil -} - -func mergeValues(proClient procli.Client, options *Options, log log.Logger) (string, error) { - // merge values - chartOptions, err := toChartOptions(proClient, options, log) - if err != nil { - return "", err - } - logger := logr.New(log.LogrLogSink()) - chartValues, err := vclusterconfig.GetExtraValues(chartOptions, logger) - if err != nil { - return "", err - } - - // parse into map - outValues, err := parseString(chartValues) - if err != nil { - return "", err - } - - // merge values - for _, valuesFile := range options.Values { - out, err := os.ReadFile(valuesFile) - if err != nil { - return "", fmt.Errorf("reading values file %s: %w", valuesFile, err) - } - - extraValues, err := parseString(string(out)) - if err != nil { - return "", fmt.Errorf("parse values file %s: %w", valuesFile, err) - } - - strvals.MergeMaps(outValues, extraValues) - } - - // merge set - for _, set := range options.SetValues { - err = strvals.ParseIntoString(set, outValues) - if err != nil { - return "", fmt.Errorf("apply --set %s: %w", set, err) - } - } - - // out - out, err := yaml.Marshal(outValues) - if err != nil { - return "", err - } - - return string(out), nil -} - -func parseString(str string) (map[string]interface{}, error) { - out := map[string]interface{}{} - err := yaml.Unmarshal([]byte(str), &out) - if err != nil { - return nil, err - } - - return out, nil -} - -func toChartOptions(proClient procli.Client, options *Options, log log.Logger) (*vclusterconfig.ExtraValuesOptions, error) { - if !util.Contains(options.Distro, AllowedDistros) { - return nil, fmt.Errorf("unsupported distro %s, please select one of: %s", options.Distro, strings.Join(AllowedDistros, ", ")) - } - - if options.ChartName == "vcluster" && options.Distro != "k3s" { - options.ChartName += "-" + options.Distro - } - - version := vclusterconfig.KubernetesVersion{} - if options.KubernetesVersion != "" { - if options.KubernetesVersion[0] != 'v' { - options.KubernetesVersion = "v" + options.KubernetesVersion - } - - if !semver.IsValid(options.KubernetesVersion) { - return nil, fmt.Errorf("please use valid semantic versioning format, e.g. vX.X") - } - - majorMinorVer := semver.MajorMinor(options.KubernetesVersion) - if splittedVersion := strings.Split(options.KubernetesVersion, "."); len(splittedVersion) > 2 { - log.Warnf("currently we only support major.minor version (%s) and not the patch version (%s)", majorMinorVer, options.KubernetesVersion) - } - - parsedVersion, err := vclusterconfig.ParseKubernetesVersionInfo(majorMinorVer) - if err != nil { - return nil, err - } - - version.Major = parsedVersion.Major - version.Minor = parsedVersion.Minor - } - - // use default version if its development - if options.ChartVersion == upgrade.DevelopmentVersion { - options.ChartVersion = "" - } - - return &vclusterconfig.ExtraValuesOptions{ - Distro: options.Distro, - KubernetesVersion: version, - DisableTelemetry: cliconfig.GetConfig(log).TelemetryDisabled, - InstanceCreatorType: "vclusterctl", - PlatformInstanceID: telemetry.GetPlatformInstanceID(proClient.Self()), - PlatformUserID: telemetry.GetPlatformUserID(proClient.Self()), - MachineID: telemetry.GetMachineID(log), - }, nil -} diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create/types.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create/types.go deleted file mode 100644 index c8ad09c1..00000000 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create/types.go +++ /dev/null @@ -1,59 +0,0 @@ -package create - -// Options holds the create cmd options -type Options struct { - KubeConfigContextName string - ChartVersion string - ChartName string - ChartRepo string - LocalChartDir string - Distro string - Values []string - SetValues []string - - KubernetesVersion string - - CreateNamespace bool - UpdateCurrent bool - Expose bool - ExposeLocal bool - - Connect bool - Upgrade bool - - // Pro - Project string - Cluster string - Template string - TemplateVersion string - Links []string - Annotations []string - Labels []string - Params string - SetParams []string - DisablePro bool -} - -type Values struct { - Init Init `json:"init" mapstructure:"init"` -} - -type Init struct { - Manifests string `json:"manifests" mapstructure:"manifests"` - Helm []HelmChart `json:"helm" mapstructure:"helm"` -} - -type HelmChart struct { - Bundle string `json:"bundle,omitempty" mapstructure:"bundle,omitempty"` - Name string `json:"name,omitempty" mapstructure:"name"` - Repo string `json:"repo,omitempty" mapstructure:"repo"` - Version string `json:"version,omitempty" mapstructure:"version"` - Namespace string `json:"namespace,omitempty" mapstructure:"namespace"` - Values string `json:"values,omitempty" mapstructure:"values"` - Release HelmRelease `json:"release,omitempty" mapstructure:"release"` -} - -type HelmRelease struct { - Name string `json:"name,omitempty" mapstructure:"name"` - Namespace string `json:"namespace,omitempty" mapstructure:"namespace"` -} diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/completion.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/completion.go index 81276da4..ce4ce306 100644 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/completion.go +++ b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/completion.go @@ -51,7 +51,7 @@ func wrapCompletionFuncWithTimeout(defaultDirective cobra.ShellCompDirective, co // It takes into account the namespace if specified by the --namespace flag. func newValidVClusterNameFunc(globalFlags *flags.GlobalFlags) completionFunc { fn := func(cmd *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { - vclusters, _, err := find.ListVClusters(cmd.Context(), nil, globalFlags.Context, "", globalFlags.Namespace, "", log.Default.ErrorStreamOnly()) + vclusters, err := find.ListVClusters(cmd.Context(), globalFlags.Context, "", globalFlags.Namespace, log.Default.ErrorStreamOnly()) if err != nil { return []string{}, cobra.ShellCompDirectiveError | cobra.ShellCompDirectiveNoFileComp } diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/connect.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/connect.go index c770d491..5d2e3afd 100644 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/connect.go +++ b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/connect.go @@ -13,10 +13,6 @@ import ( "syscall" "time" - "github.com/loft-sh/loftctl/v3/cmd/loftctl/cmd/use" - proclient "github.com/loft-sh/loftctl/v3/pkg/client" - "github.com/loft-sh/loftctl/v3/pkg/vcluster" - "github.com/loft-sh/vcluster/pkg/procli" "github.com/loft-sh/vcluster/pkg/util/clihelper" "github.com/pkg/errors" "github.com/samber/lo" @@ -61,7 +57,6 @@ type ConnectCmd struct { KubeConfigContextName string Server string KubeConfig string - Project string ServiceAccount string LocalPort int ServiceAccountExpiration int @@ -121,9 +116,6 @@ vcluster connect test -n test -- kubectl get ns cobraCmd.Flags().BoolVar(&cmd.Insecure, "insecure", false, "If specified, vcluster will create the kube config with insecure-skip-tls-verify") cobraCmd.Flags().BoolVar(&cmd.BackgroundProxy, "background-proxy", false, "If specified, vcluster will create the background proxy in docker [its mainly used for vclusters with no nodeport service.]") - // pro - cobraCmd.Flags().StringVar(&cmd.Project, "project", "", "[PRO] The pro project the vcluster is in") - return cobraCmd } @@ -134,15 +126,10 @@ func (cmd *ConnectCmd) Run(ctx context.Context, args []string) error { vClusterName = args[0] } - proClient, err := procli.CreateProClient() - if err != nil { - cmd.Log.Debugf("Error creating pro client: %v", err) - } - - return cmd.Connect(ctx, proClient, vClusterName, args[1:]) + return cmd.Connect(ctx, vClusterName, args[1:]) } -func (cmd *ConnectCmd) Connect(ctx context.Context, proClient procli.Client, vClusterName string, command []string) error { +func (cmd *ConnectCmd) Connect(ctx context.Context, vClusterName string, command []string) error { // validate flags err := cmd.validateFlags() if err != nil { @@ -150,11 +137,9 @@ func (cmd *ConnectCmd) Connect(ctx context.Context, proClient procli.Client, vCl } // retrieve the vcluster - vCluster, proVCluster, err := find.GetVCluster(ctx, proClient, cmd.Context, vClusterName, cmd.Namespace, cmd.Project, cmd.Log) + vCluster, err := find.GetVCluster(ctx, cmd.Context, vClusterName, cmd.Namespace, cmd.Log) if err != nil { return err - } else if proVCluster != nil { - return cmd.connectPro(ctx, proClient, proVCluster, command) } return cmd.connectOss(ctx, vCluster, command) @@ -168,58 +153,6 @@ func (cmd *ConnectCmd) validateFlags() error { return nil } -func (cmd *ConnectCmd) connectPro(ctx context.Context, proClient proclient.Client, vCluster *procli.VirtualClusterInstanceProject, command []string) error { - err := cmd.validateProFlags() - if err != nil { - return err - } - - // create management client - managementClient, err := proClient.Management() - if err != nil { - return err - } - - // wait for vCluster to become ready - vCluster.VirtualCluster, err = vcluster.WaitForVirtualClusterInstance(ctx, managementClient, vCluster.VirtualCluster.Namespace, vCluster.VirtualCluster.Name, true, cmd.Log) - if err != nil { - return err - } - - // retrieve vCluster kube config - kubeConfig, err := cmd.getVClusterProKubeConfig(ctx, proClient, vCluster) - if err != nil { - return err - } - - // check if we should execute command - if len(command) > 0 { - return cmd.executeCommand(*kubeConfig, command) - } - - return cmd.writeKubeConfig(kubeConfig, vCluster.VirtualCluster.Name) -} - -func (cmd *ConnectCmd) validateProFlags() error { - if cmd.PodName != "" { - return fmt.Errorf("cannot use --pod with a pro vCluster") - } - if cmd.Server != "" { - return fmt.Errorf("cannot use --server with a pro vCluster") - } - if cmd.BackgroundProxy { - return fmt.Errorf("cannot use --background-proxy with a pro vCluster") - } - if cmd.LocalPort != 0 { - return fmt.Errorf("cannot use --local-port with a pro vCluster") - } - if cmd.Address != "" { - return fmt.Errorf("cannot use --address with a pro vCluster") - } - - return nil -} - func (cmd *ConnectCmd) connectOss(ctx context.Context, vCluster *find.VCluster, command []string) error { // prepare clients and find vcluster err := cmd.prepare(ctx, vCluster) @@ -397,76 +330,6 @@ func (cmd *ConnectCmd) prepare(ctx context.Context, vCluster *find.VCluster) err return nil } -func (cmd *ConnectCmd) getVClusterProKubeConfig(ctx context.Context, proClient proclient.Client, vCluster *procli.VirtualClusterInstanceProject) (*clientcmdapi.Config, error) { - contextOptions, err := use.CreateVirtualClusterInstanceOptions(ctx, proClient, "", vCluster.Project.Name, vCluster.VirtualCluster, false, false, cmd.Log) - if err != nil { - return nil, fmt.Errorf("prepare vCluster kube config: %w", err) - } - - // make sure access key is set - if contextOptions.Token == "" && len(contextOptions.ClientCertificateData) == 0 && len(contextOptions.ClientKeyData) == 0 { - contextOptions.Token = proClient.Config().AccessKey - } - - // get current context - rawConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{ - CurrentContext: cmd.Context, - }).RawConfig() - if err != nil { - return nil, err - } - - // make sure kube context name is set - if cmd.KubeConfigContextName == "" { - // use parent context if this is a vcluster context - kubeContext := rawConfig.CurrentContext - _, _, parentContext := find.VClusterProFromContext(kubeContext) - if parentContext == "" { - _, _, parentContext = find.VClusterFromContext(kubeContext) - } - if parentContext != "" { - kubeContext = parentContext - } - cmd.KubeConfigContextName = find.VClusterProContextName(vCluster.VirtualCluster.Name, vCluster.Project.Name, kubeContext) - } - - // set insecure true? - if cmd.Insecure { - contextOptions.InsecureSkipTLSVerify = true - } - - // build kube config - kubeConfig, err := clihelper.GetProKubeConfig(contextOptions) - if err != nil { - return nil, err - } - - // we want to use a service account token in the kube config - if cmd.ServiceAccount != "" { - // check if its enabled on the pro vcluster - if !vCluster.VirtualCluster.Status.VirtualCluster.ForwardToken { - return nil, fmt.Errorf("forward token is not enabled on the vCluster and hence you cannot authenticate with a service account token") - } - - // create service account token - token, err := cmd.createServiceAccountToken(ctx, *kubeConfig) - if err != nil { - return nil, err - } - - // set service account token - for k := range kubeConfig.AuthInfos { - kubeConfig.AuthInfos[k] = &clientcmdapi.AuthInfo{ - Token: token, - Extensions: make(map[string]runtime.Object), - ImpersonateUserExtra: make(map[string][]string), - } - } - } - - return kubeConfig, nil -} - func (cmd *ConnectCmd) getVClusterKubeConfig(ctx context.Context, vclusterName string, command []string) (*clientcmdapi.Config, error) { var err error podName := cmd.PodName diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/create.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/create.go index 5d819df2..2af18a0c 100644 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/create.go +++ b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/create.go @@ -11,14 +11,12 @@ import ( "strings" "time" - "github.com/go-logr/logr" "github.com/loft-sh/log/survey" "github.com/loft-sh/log/terminal" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/localkubernetes" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/find" "github.com/loft-sh/vcluster/config" "github.com/loft-sh/vcluster/pkg/embed" - "github.com/loft-sh/vcluster/pkg/procli" "github.com/loft-sh/vcluster/pkg/util/cliconfig" "github.com/loft-sh/vcluster/pkg/util/clihelper" corev1 "k8s.io/api/core/v1" @@ -26,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" loftctlUtil "github.com/loft-sh/loftctl/v3/pkg/util" - "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create" "github.com/loft-sh/vcluster/pkg/upgrade" "github.com/loft-sh/vcluster/pkg/util" "golang.org/x/mod/semver" @@ -46,6 +43,56 @@ import ( var CreatedByVClusterAnnotation = "vcluster.loft.sh/created" +const LoftChartRepo = "https://charts.loft.sh" + +var AllowedDistros = []string{"k8s", "k3s", "k0s", "eks"} + +// Options holds the create cmd options +type Options struct { + KubeConfigContextName string + ChartVersion string + ChartName string + ChartRepo string + LocalChartDir string + Distro string + Values []string + SetValues []string + + KubernetesVersion string + + CreateNamespace bool + UpdateCurrent bool + Expose bool + ExposeLocal bool + + Connect bool + Upgrade bool +} + +type Values struct { + Init Init `json:"init" mapstructure:"init"` +} + +type Init struct { + Manifests string `json:"manifests" mapstructure:"manifests"` + Helm []HelmChart `json:"helm" mapstructure:"helm"` +} + +type HelmChart struct { + Bundle string `json:"bundle,omitempty" mapstructure:"bundle,omitempty"` + Name string `json:"name,omitempty" mapstructure:"name"` + Repo string `json:"repo,omitempty" mapstructure:"repo"` + Version string `json:"version,omitempty" mapstructure:"version"` + Namespace string `json:"namespace,omitempty" mapstructure:"namespace"` + Values string `json:"values,omitempty" mapstructure:"values"` + Release HelmRelease `json:"release,omitempty" mapstructure:"release"` +} + +type HelmRelease struct { + Name string `json:"name,omitempty" mapstructure:"name"` + Namespace string `json:"namespace,omitempty" mapstructure:"namespace"` +} + // CreateCmd holds the login cmd flags type CreateCmd struct { *flags.GlobalFlags @@ -53,7 +100,7 @@ type CreateCmd struct { log log.Logger kubeClientConfig clientcmd.ClientConfig kubeClient *kubernetes.Clientset - create.Options + Options localCluster bool } @@ -90,8 +137,8 @@ vcluster create test --namespace test cobraCmd.Flags().StringVar(&cmd.KubeConfigContextName, "kube-config-context-name", "", "If set, will override the context name of the generated virtual cluster kube config with this name") cobraCmd.Flags().StringVar(&cmd.ChartVersion, "chart-version", upgrade.GetVersion(), "The virtual cluster chart version to use (e.g. v0.9.1)") cobraCmd.Flags().StringVar(&cmd.ChartName, "chart-name", "vcluster", "The virtual cluster chart name to use") - cobraCmd.Flags().StringVar(&cmd.ChartRepo, "chart-repo", create.LoftChartRepo, "The virtual cluster chart repo to use") - cobraCmd.Flags().StringVar(&cmd.Distro, "distro", "k3s", fmt.Sprintf("Kubernetes distro to use for the virtual cluster. Allowed distros: %s", strings.Join(create.AllowedDistros, ", "))) + cobraCmd.Flags().StringVar(&cmd.ChartRepo, "chart-repo", LoftChartRepo, "The virtual cluster chart repo to use") + cobraCmd.Flags().StringVar(&cmd.Distro, "distro", "k8s", fmt.Sprintf("Kubernetes distro to use for the virtual cluster. Allowed distros: %s", strings.Join(AllowedDistros, ", "))) cobraCmd.Flags().StringVar(&cmd.KubernetesVersion, "kubernetes-version", "", "The kubernetes version to use (e.g. v1.20). Patch versions are not supported") cobraCmd.Flags().StringArrayVarP(&cmd.Values, "values", "f", []string{}, "Path where to load extra helm values from") cobraCmd.Flags().StringArrayVar(&cmd.SetValues, "set", []string{}, "Set values for helm. E.g. --set 'persistence.enabled=true'") @@ -102,18 +149,6 @@ vcluster create test --namespace test cobraCmd.Flags().BoolVar(&cmd.Connect, "connect", true, "If true will run vcluster connect directly after the vcluster was created") cobraCmd.Flags().BoolVar(&cmd.Upgrade, "upgrade", false, "If true will try to upgrade the vcluster instead of failing if it already exists") - cobraCmd.Flags().BoolVar(&cmd.DisablePro, "disable-pro", false, "If true vcluster will not try to create a vCluster.Pro. You can also use 'vcluster logout' to prevent vCluster from creating any pro clusters") - - // pro flags - cobraCmd.Flags().StringVar(&cmd.Project, "project", "", "[PRO] The vCluster.Pro project to use") - cobraCmd.Flags().StringSliceVarP(&cmd.Labels, "labels", "l", []string{}, "[PRO] Comma separated labels to apply to the virtualclusterinstance") - cobraCmd.Flags().StringSliceVar(&cmd.Annotations, "annotations", []string{}, "[PRO] Comma separated annotations to apply to the virtualclusterinstance") - cobraCmd.Flags().StringVar(&cmd.Cluster, "cluster", "", "[PRO] The vCluster.Pro connected cluster to use") - cobraCmd.Flags().StringVar(&cmd.Template, "template", "", "[PRO] The vCluster.Pro template to use") - cobraCmd.Flags().StringVar(&cmd.TemplateVersion, "template-version", "", "[PRO] The vCluster.Pro template version to use") - cobraCmd.Flags().StringArrayVar(&cmd.Links, "link", []string{}, "[PRO] A link to add to the vCluster. E.g. --link 'prod=http://exampleprod.com'") - cobraCmd.Flags().StringVar(&cmd.Params, "params", "", "[PRO] If a template is used, this can be used to use a file for the parameters. E.g. --params path/to/my/file.yaml") - cobraCmd.Flags().StringArrayVar(&cmd.SetParams, "set-param", []string{}, "[PRO] If a template is used, this can be used to set a specific parameter. E.g. --set-param 'my-param=my-value'") // hidden / deprecated cobraCmd.Flags().StringVar(&cmd.LocalChartDir, "local-chart-dir", "", "The virtual cluster local chart dir to use") @@ -124,45 +159,8 @@ vcluster create test --namespace test return cobraCmd } -var loginText = "\nPlease run:\n * 'vcluster login' to connect to an existing vCluster.Pro instance\n * 'vcluster pro start' to deploy a new vCluster.Pro instance" - // Run executes the functionality func (cmd *CreateCmd) Run(ctx context.Context, args []string) error { - // check if we should create a pro cluster - if !cmd.DisablePro { - proClient, err := procli.CreateProClient() - if err == nil { - // deploy pro cluster - err = create.DeployProCluster(ctx, &cmd.Options, proClient, args[0], cmd.Namespace, cmd.log) - if err != nil { - return err - } - - // check if we should connect to the vcluster - if cmd.Connect { - connectCmd := &ConnectCmd{ - GlobalFlags: cmd.GlobalFlags, - UpdateCurrent: cmd.UpdateCurrent, - KubeConfigContextName: cmd.KubeConfigContextName, - KubeConfig: "./kubeconfig.yaml", - Project: cmd.Project, - Log: cmd.log, - } - - return connectCmd.Connect(ctx, proClient, args[0], nil) - } - - cmd.log.Donef("Successfully created virtual cluster %s in project %s. \n- Use 'vcluster connect %s --project %s' to access the virtual cluster\n- Use `vcluster connect %s --project %s -- kubectl get ns` to run a command directly within the vcluster", args[0], cmd.Project, args[0], cmd.Project, args[0], cmd.Project) - return nil - } - } - - // validate oss flags - err := cmd.validateOSSFlags() - if err != nil { - return err - } - // make sure we deploy the correct version if cmd.ChartVersion == upgrade.DevelopmentVersion { cmd.ChartVersion = "" @@ -197,8 +195,7 @@ func (cmd *CreateCmd) Run(ctx context.Context, args []string) error { if err != nil { return err } - logger := logr.New(cmd.log.LogrLogSink()) - chartValues, err := config.GetExtraValues(chartOptions, logger) + chartValues, err := config.GetExtraValues(chartOptions) if err != nil { return err } @@ -253,7 +250,7 @@ func (cmd *CreateCmd) Run(ctx context.Context, args []string) error { Log: cmd.log, } - return connectCmd.Connect(ctx, nil, args[0], nil) + return connectCmd.Connect(ctx, args[0], nil) } return fmt.Errorf("vcluster %s already exists in namespace %s\n- Use `vcluster create %s -n %s --upgrade` to upgrade the vcluster\n- Use `vcluster connect %s -n %s` to access the vcluster", args[0], cmd.Namespace, args[0], cmd.Namespace, args[0], cmd.Namespace) @@ -277,7 +274,7 @@ func (cmd *CreateCmd) Run(ctx context.Context, args []string) error { Log: cmd.log, } - return connectCmd.Connect(ctx, nil, args[0], nil) + return connectCmd.Connect(ctx, args[0], nil) } if cmd.localCluster { @@ -289,32 +286,6 @@ func (cmd *CreateCmd) Run(ctx context.Context, args []string) error { return nil } -func (cmd *CreateCmd) validateOSSFlags() error { - if cmd.Project != "" { - return fmt.Errorf("cannot use --project as you are not connected to a vCluster.Pro instance." + loginText) - } - if cmd.Cluster != "" { - return fmt.Errorf("cannot use --cluster as you are not connected to a vCluster.Pro instance." + loginText) - } - if cmd.Template != "" { - return fmt.Errorf("cannot use --template as you are not connected to a vCluster.Pro instance." + loginText) - } - if cmd.TemplateVersion != "" { - return fmt.Errorf("cannot use --template-version as you are not connected to a vCluster.Pro instance." + loginText) - } - if len(cmd.Links) > 0 { - return fmt.Errorf("cannot use --link as you are not connected to a vCluster.Pro instance." + loginText) - } - if cmd.Params != "" { - return fmt.Errorf("cannot use --params as you are not connected to a vCluster.Pro instance." + loginText) - } - if len(cmd.SetParams) > 0 { - return fmt.Errorf("cannot use --set-params as you are not connected to a vCluster.Pro instance." + loginText) - } - - return nil -} - func getBase64DecodedString(values string) (string, error) { strDecoded, err := base64.StdEncoding.DecodeString(values) if err != nil { @@ -364,8 +335,8 @@ func (cmd *CreateCmd) deployChart(ctx context.Context, vClusterName, chartValues // rewrite chart location, this is an optimization to avoid // downloading the whole index.yaml and parsing it - if !chartEmbedded && cmd.ChartRepo == create.LoftChartRepo && cmd.ChartVersion != "" { // specify versioned path to repo url - cmd.LocalChartDir = create.LoftChartRepo + "/charts/" + cmd.ChartName + "-" + strings.TrimPrefix(cmd.ChartVersion, "v") + ".tgz" + if !chartEmbedded && cmd.ChartRepo == LoftChartRepo && cmd.ChartVersion != "" { // specify versioned path to repo url + cmd.LocalChartDir = LoftChartRepo + "/charts/" + cmd.ChartName + "-" + strings.TrimPrefix(cmd.ChartVersion, "v") + ".tgz" } } @@ -394,12 +365,8 @@ func (cmd *CreateCmd) deployChart(ctx context.Context, vClusterName, chartValues } func (cmd *CreateCmd) ToChartOptions(kubernetesVersion *version.Info, log log.Logger) (*config.ExtraValuesOptions, error) { - if !util.Contains(cmd.Distro, create.AllowedDistros) { - return nil, fmt.Errorf("unsupported distro %s, please select one of: %s", cmd.Distro, strings.Join(create.AllowedDistros, ", ")) - } - - if cmd.ChartName == "vcluster" && cmd.Distro != "k3s" { - cmd.ChartName += "-" + cmd.Distro + if !util.Contains(cmd.Distro, AllowedDistros) { + return nil, fmt.Errorf("unsupported distro %s, please select one of: %s", cmd.Distro, strings.Join(AllowedDistros, ", ")) } // check if we should create with node port @@ -441,9 +408,6 @@ func (cmd *CreateCmd) prepare(ctx context.Context, vClusterName string) error { // check if vcluster in vcluster _, _, previousContext := find.VClusterFromContext(rawConfig.CurrentContext) - if previousContext == "" { - _, _, previousContext = find.VClusterProFromContext(rawConfig.CurrentContext) - } if previousContext != "" { if terminal.IsTerminalIn { switchBackOption := "No, switch back to context " + previousContext diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/delete.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/delete.go index cb08edc9..7ee99081 100644 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/delete.go +++ b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/delete.go @@ -1,17 +1,13 @@ package cmd import ( - "context" "fmt" "os/exec" "time" - proclient "github.com/loft-sh/loftctl/v3/pkg/client" - "github.com/loft-sh/loftctl/v3/pkg/kube" loftctlUtil "github.com/loft-sh/loftctl/v3/pkg/util" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/localkubernetes" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/find" - "github.com/loft-sh/vcluster/pkg/procli" "github.com/loft-sh/vcluster/pkg/util/clihelper" "github.com/loft-sh/vcluster/pkg/util/translate" "k8s.io/client-go/rest" @@ -32,11 +28,11 @@ import ( // DeleteCmd holds the delete cmd flags type DeleteCmd struct { *flags.GlobalFlags - log log.Logger - rawConfig *clientcmdapi.Config - restConfig *rest.Config - kubeClient *kubernetes.Clientset - Project string + log log.Logger + rawConfig *clientcmdapi.Config + restConfig *rest.Config + kubeClient *kubernetes.Clientset + Wait bool KeepPVC bool DeleteNamespace bool @@ -73,7 +69,6 @@ vcluster delete test --namespace test }, } - cobraCmd.Flags().StringVar(&cmd.Project, "project", "", "[PRO] The pro project the vcluster is in") cobraCmd.Flags().BoolVar(&cmd.Wait, "wait", true, "If enabled, vcluster will wait until the vcluster is deleted") cobraCmd.Flags().BoolVar(&cmd.DeleteConfigMap, "delete-configmap", false, "If enabled, vCluster will delete the ConfigMap of the vCluster") cobraCmd.Flags().BoolVar(&cmd.KeepPVC, "keep-pvc", false, "If enabled, vcluster will not delete the persistent volume claim of the vcluster") @@ -87,15 +82,9 @@ vcluster delete test --namespace test func (cmd *DeleteCmd) Run(cobraCmd *cobra.Command, args []string) error { ctx := cobraCmd.Context() - // get pro client - proClient, err := procli.CreateProClient() - if err != nil { - cmd.log.Debugf("Error creating pro client: %v", err) - } - // find vcluster vClusterName := args[0] - vCluster, proVCluster, err := find.GetVCluster(ctx, proClient, cmd.Context, vClusterName, cmd.Namespace, cmd.Project, cmd.log) + vCluster, err := find.GetVCluster(ctx, cmd.Context, vClusterName, cmd.Namespace, cmd.log) if err != nil { if !cmd.IgnoreNotFound { return err @@ -105,8 +94,6 @@ func (cmd *DeleteCmd) Run(cobraCmd *cobra.Command, args []string) error { return err } return nil - } else if proVCluster != nil { - return cmd.deleteProVCluster(cobraCmd.Context(), proClient, proVCluster) } // prepare client @@ -198,7 +185,7 @@ func (cmd *DeleteCmd) Run(cobraCmd *cobra.Command, args []string) error { } // check if there are any other vclusters in the namespace you are deleting vcluster in. - vClusters, _, err := find.ListVClusters(cobraCmd.Context(), nil, cmd.Context, "", cmd.Namespace, "", cmd.log) + vClusters, err := find.ListVClusters(cobraCmd.Context(), cmd.Context, "", cmd.Namespace, cmd.log) if err != nil { return err } @@ -264,44 +251,6 @@ func (cmd *DeleteCmd) Run(cobraCmd *cobra.Command, args []string) error { return nil } -func (cmd *DeleteCmd) deleteProVCluster(ctx context.Context, proClient proclient.Client, vCluster *procli.VirtualClusterInstanceProject) error { - managementClient, err := proClient.Management() - if err != nil { - return err - } - - cmd.log.Infof("Deleting virtual cluster %s in project %s", vCluster.VirtualCluster.Name, vCluster.Project.Name) - - err = managementClient.Loft().ManagementV1().VirtualClusterInstances(vCluster.VirtualCluster.Namespace).Delete(ctx, vCluster.VirtualCluster.Name, metav1.DeleteOptions{}) - if err != nil { - return errors.Wrap(err, "delete virtual cluster") - } - - cmd.log.Donef("Successfully deleted virtual cluster %s in project %s", vCluster.VirtualCluster.Name, vCluster.Project.Name) - - // update kube config - err = deleteProContext(vCluster.VirtualCluster.Name, vCluster.Project.Name) - if err != nil { - return errors.Wrap(err, "delete kube context") - } - - // wait until deleted - if cmd.Wait { - cmd.log.Info("Waiting for virtual cluster to be deleted...") - for isVirtualClusterInstanceStillThere(ctx, managementClient, vCluster.VirtualCluster.Namespace, vCluster.VirtualCluster.Name) { - time.Sleep(time.Second) - } - cmd.log.Done("Virtual Cluster is deleted") - } - - return nil -} - -func isVirtualClusterInstanceStillThere(ctx context.Context, managementClient kube.Interface, namespace, name string) bool { - _, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(namespace).Get(ctx, name, metav1.GetOptions{}) - return err == nil -} - func (cmd *DeleteCmd) prepare(vCluster *find.VCluster) error { // load the raw config rawConfig, err := vCluster.ClientFactory.RawConfig() @@ -340,29 +289,6 @@ func (cmd *DeleteCmd) prepare(vCluster *find.VCluster) error { return nil } -func deleteProContext(vClusterName, projectName string) error { - kubeClientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}) - kubeConfig, err := kubeClientConfig.RawConfig() - if err != nil { - return fmt.Errorf("load kube config: %w", err) - } - - // remove matching contexts - for contextName := range kubeConfig.Contexts { - name, project, previousContext := find.VClusterProFromContext(contextName) - if vClusterName != name || projectName != project { - continue - } - - err := deleteContext(&kubeConfig, contextName, previousContext) - if err != nil { - return err - } - } - - return nil -} - func deleteContext(kubeConfig *clientcmdapi.Config, kubeContext string, otherContext string) error { // Get context contextRaw, ok := kubeConfig.Contexts[kubeContext] diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/disconnect.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/disconnect.go index d4e7dd92..3c41c383 100644 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/disconnect.go +++ b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/disconnect.go @@ -66,13 +66,8 @@ func (cmd *DisconnectCmd) Run() error { // get vcluster info from context vClusterName, _, otherContext := find.VClusterFromContext(cmd.Context) if vClusterName == "" { - // get vcluster-pro info from context - vClusterName, _, otherContext = find.VClusterProFromContext(cmd.Context) - if vClusterName == "" { - return fmt.Errorf("current selected context \"%s\" is not a vcluster context. If you've used a custom context name you will need to switch manually using kubectl", otherContext) - } + return fmt.Errorf("current selected context \"%s\" is not a vcluster context. If you've used a custom context name you will need to switch manually using kubectl", otherContext) } - if otherContext == "" { otherContext, err = cmd.selectContext(&rawConfig, otherContext) if err != nil { diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/find/find.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/find/find.go index 247c3816..0b27d075 100644 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/find/find.go +++ b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/find/find.go @@ -6,11 +6,9 @@ import ( "strings" "time" - loftclientset "github.com/loft-sh/agentapi/v3/pkg/client/loft/clientset_generated/clientset" "github.com/loft-sh/log" "github.com/loft-sh/log/survey" "github.com/loft-sh/log/terminal" - "github.com/loft-sh/vcluster/pkg/procli" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/loft-sh/vcluster/pkg/constants" @@ -67,61 +65,52 @@ func CurrentContext() (string, *clientcmdapi.Config, error) { return rawConfig.CurrentContext, &rawConfig, nil } -func GetVCluster(ctx context.Context, proClient procli.Client, context, name, namespace, project string, log log.Logger) (*VCluster, *procli.VirtualClusterInstanceProject, error) { +func GetVCluster(ctx context.Context, context, name, namespace string, log log.Logger) (*VCluster, error) { if name == "" { - return nil, nil, fmt.Errorf("please specify a name") + return nil, fmt.Errorf("please specify a name") } // list vclusters - ossVClusters, proVClusters, err := ListVClusters(ctx, proClient, context, name, namespace, project, log) + ossVClusters, err := ListVClusters(ctx, context, name, namespace, log) if err != nil { - return nil, nil, err + return nil, err } // figure out what we want to return - if len(ossVClusters) == 0 && len(proVClusters) == 0 { - return nil, nil, &VclusterNotFoundError{Name: name} - } else if len(ossVClusters) == 1 && len(proVClusters) == 0 { - return &ossVClusters[0], nil, nil - } else if len(proVClusters) == 1 && len(ossVClusters) == 0 { - return nil, &proVClusters[0], nil + if len(ossVClusters) == 0 { + return nil, &VclusterNotFoundError{Name: name} + } else if len(ossVClusters) == 1 { + return &ossVClusters[0], nil } // check if terminal if !terminal.IsTerminalIn { - return nil, nil, fmt.Errorf("multiple vclusters with name %s found, please specify a project via --project or a namespace via --namespace to select the correct one", name) + return nil, fmt.Errorf("multiple vclusters with name %s found, please specify a namespace via --namespace to select the correct one", name) } // ask a question questionOptionsUnformatted := [][]string{} for _, vCluster := range ossVClusters { - questionOptionsUnformatted = append(questionOptionsUnformatted, []string{name, vCluster.Namespace, "false"}) - } - for _, vCluster := range proVClusters { - questionOptionsUnformatted = append(questionOptionsUnformatted, []string{name, vCluster.Project.Name, "true"}) + questionOptionsUnformatted = append(questionOptionsUnformatted, []string{name, vCluster.Namespace}) } - questionOptions := FormatOptions("Name: %s | Namespace / Project: %s | Pro: %s ", questionOptionsUnformatted) + questionOptions := FormatOptions("Name: %s | Namespace: %s", questionOptionsUnformatted) selectedVCluster, err := log.Question(&survey.QuestionOptions{ Question: "Please choose a virtual cluster to use", DefaultValue: questionOptions[0], Options: questionOptions, }) if err != nil { - return nil, nil, err + return nil, err } // match answer for idx, s := range questionOptions { if s == selectedVCluster { - if idx < len(ossVClusters) { - return &ossVClusters[idx], nil, nil - } - - return nil, &proVClusters[idx-len(ossVClusters)], nil + return &ossVClusters[idx], nil } } - return nil, nil, fmt.Errorf("unexpected error searching for selected vcluster") + return nil, fmt.Errorf("unexpected error searching for selected vcluster") } func FormatOptions(format string, options [][]string) []string { @@ -156,47 +145,31 @@ func FormatOptions(format string, options [][]string) []string { return retOptions } -func ListVClusters(ctx context.Context, proClient procli.Client, context, name, namespace, project string, log log.Logger) ([]VCluster, []procli.VirtualClusterInstanceProject, error) { +func ListVClusters(ctx context.Context, context, name, namespace string, log log.Logger) ([]VCluster, error) { var err error if context == "" { var err error context, _, err = CurrentContext() if err != nil { - return nil, nil, err + return nil, err } } - var ossVClusters []VCluster - if project == "" { - ossVClusters, err = ListOSSVClusters(ctx, context, name, namespace) - if err != nil { - log.Warnf("Error retrieving vclusters: %v", err) - } - } - - var proVClusters []procli.VirtualClusterInstanceProject - if proClient != nil { - proVClusters, err = procli.ListVClusters(ctx, proClient, name, project) - if err != nil { - log.Warnf("Error retrieving pro vclusters: %v", err) - } + ossVClusters, err := ListOSSVClusters(ctx, context, name, namespace) + if err != nil { + log.Warnf("Error retrieving vclusters: %v", err) } - return ossVClusters, proVClusters, nil + return ossVClusters, nil } func ListOSSVClusters(ctx context.Context, context, name, namespace string) ([]VCluster, error) { var err error timeout := time.Minute - vClusterName, _, vClusterContext := VClusterProFromContext(context) - if vClusterContext != "" { - timeout = time.Second * 10 - } else { - vClusterName, _, vClusterContext = VClusterFromContext(context) - if vClusterName != "" { - timeout = time.Second * 5 - } + vClusterName, _, vClusterContext := VClusterFromContext(context) + if vClusterName != "" { + timeout = time.Second * 5 } vclusters, err := findInContext(ctx, context, name, namespace, timeout, false) @@ -216,10 +189,6 @@ func ListOSSVClusters(ctx context.Context, context, name, namespace string) ([]V return vclusters, nil } -func VClusterProContextName(vClusterName string, projectName string, currentContext string) string { - return "vcluster-pro_" + vClusterName + "_" + projectName + "_" + currentContext -} - func VClusterContextName(vClusterName string, vClusterNamespace string, currentContext string) string { return "vcluster_" + vClusterName + "_" + vClusterNamespace + "_" + currentContext } @@ -228,21 +197,6 @@ func VClusterConnectBackgroundProxyName(vClusterName string, vClusterNamespace s return VClusterContextName(vClusterName, vClusterNamespace, currentContext) + "_background_proxy" } -func VClusterProFromContext(originalContext string) (name string, project string, context string) { - if !strings.HasPrefix(originalContext, "vcluster-pro_") { - return "", "", "" - } - - splitted := strings.Split(originalContext, "_") - // vcluster-pro___ - if len(splitted) >= 4 { - return splitted[1], splitted[2], strings.Join(splitted[3:], "_") - } - - // we don't know for sure, but most likely specified custom vcluster context name - return originalContext, "", "" -} - func VClusterFromContext(originalContext string) (name string, namespace string, context string) { if !strings.HasPrefix(originalContext, "vcluster_") { return "", "", "" @@ -277,10 +231,6 @@ func findInContext(ctx context.Context, context, name, namespace string, timeout if err != nil { return nil, errors.Wrap(err, "create kube client") } - loftClient, err := loftclientset.NewForConfig(restConfig) - if err != nil { - return nil, errors.Wrap(err, "create new loft api client") - } // statefulset based vclusters statefulSets, err := getStatefulSets(ctx, kubeClient, namespace, kubeClientConfig, timeout) @@ -309,12 +259,6 @@ func findInContext(ctx context.Context, context, name, namespace string, timeout continue } - // skip pro clusters - virtualCluster, err := loftClient.StorageV1().VirtualClusters(p.Namespace).Get(ctx, p.Name, metav1.GetOptions{}) - if err == nil && (virtualCluster.Annotations == nil || virtualCluster.Annotations["loft.sh/skip-helm-deploy"] != "true") { - continue - } - vCluster, err := getVCluster(ctx, &p, context, release, kubeClient, kubeClientConfig) if err != nil { return nil, err @@ -335,12 +279,6 @@ func findInContext(ctx context.Context, context, name, namespace string, timeout continue } - // skip pro clusters - virtualCluster, err := loftClient.StorageV1().VirtualClusters(p.Namespace).Get(ctx, p.Name, metav1.GetOptions{}) - if err == nil && (virtualCluster.Annotations == nil || virtualCluster.Annotations["loft.sh/skip-helm-deploy"] != "true") { - continue - } - vCluster, err2 := getVCluster(ctx, &p, context, release, kubeClient, kubeClientConfig) if err2 != nil { return nil, err2 diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/list.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/list.go index 092584c4..8d24e8de 100644 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/list.go +++ b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/list.go @@ -6,7 +6,6 @@ import ( "time" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/find" - "github.com/loft-sh/vcluster/pkg/procli" "github.com/sirupsen/logrus" "k8s.io/client-go/tools/clientcmd" @@ -23,13 +22,10 @@ type VCluster struct { Created time.Time Name string Namespace string - Context string - Cluster string Version string Status string AgeSeconds int Connected bool - Pro bool } // ListCmd holds the login cmd flags @@ -91,19 +87,13 @@ func (cmd *ListCmd) Run(cobraCmd *cobra.Command, _ []string) error { namespace = cmd.Namespace } - proClient, err := procli.CreateProClient() - if err != nil { - cmd.log.Debugf("Error creating pro client: %v", err) - } - - vClusters, proVClusters, err := find.ListVClusters(cobraCmd.Context(), proClient, cmd.Context, "", namespace, "", cmd.log.ErrorStreamOnly()) + vClusters, err := find.ListVClusters(cobraCmd.Context(), cmd.Context, "", namespace, cmd.log.ErrorStreamOnly()) if err != nil { return err } var output []VCluster output = append(output, ossToVClusters(vClusters, currentContext)...) - output = append(output, proToVClusters(proVClusters, currentContext)...) if cmd.output == "json" { bytes, err := json.MarshalIndent(output, "", " ") @@ -112,7 +102,7 @@ func (cmd *ListCmd) Run(cobraCmd *cobra.Command, _ []string) error { } cmd.log.WriteString(logrus.InfoLevel, string(bytes)+"\n") } else { - header := []string{"NAME", "CLUSTER", "NAMESPACE", "STATUS", "VERSION", "CONNECTED", "CREATED", "AGE", "DISTRO"} + header := []string{"NAME", "NAMESPACE", "STATUS", "VERSION", "CONNECTED", "AGE"} values := toValues(output) table.PrintTable(cmd.log, header, values) if strings.HasPrefix(cmd.Context, "vcluster_") || strings.HasPrefix(cmd.Context, "vcluster-pro_") { @@ -132,9 +122,7 @@ func ossToVClusters(vClusters []find.VCluster, currentContext string) []VCluster Created: vCluster.Created.Time, Version: vCluster.Version, AgeSeconds: int(time.Since(vCluster.Created.Time).Round(time.Second).Seconds()), - Cluster: vCluster.Context, Status: string(vCluster.Status), - Pro: false, } vClusterOutput.Connected = currentContext == find.VClusterContextName( vCluster.Name, @@ -146,33 +134,6 @@ func ossToVClusters(vClusters []find.VCluster, currentContext string) []VCluster return output } -func proToVClusters(vClusters []procli.VirtualClusterInstanceProject, currentContext string) []VCluster { - var output []VCluster - for _, vCluster := range vClusters { - status := string(vCluster.VirtualCluster.Status.Phase) - if vCluster.VirtualCluster.DeletionTimestamp != nil { - status = "Terminating" - } else if status == "" { - status = "Pending" - } - - connected := strings.HasPrefix(currentContext, "vcluster-pro_"+vCluster.VirtualCluster.Name+"_"+vCluster.Project.Name) - vClusterOutput := VCluster{ - Name: vCluster.VirtualCluster.Spec.ClusterRef.VirtualCluster, - Namespace: vCluster.VirtualCluster.Spec.ClusterRef.Namespace, - Cluster: vCluster.VirtualCluster.Spec.ClusterRef.Cluster, - Connected: connected, - Created: vCluster.VirtualCluster.CreationTimestamp.Time, - AgeSeconds: int(time.Since(vCluster.VirtualCluster.CreationTimestamp.Time).Round(time.Second).Seconds()), - Status: status, - Pro: true, - Version: vCluster.VirtualCluster.Status.VirtualCluster.HelmRelease.Chart.Version, - } - output = append(output, vClusterOutput) - } - return output -} - func toValues(vClusters []VCluster) [][]string { var values [][]string for _, vCluster := range vClusters { @@ -181,21 +142,13 @@ func toValues(vClusters []VCluster) [][]string { isConnected = "True" } - distro := "OSS" - if vCluster.Pro { - distro = "Pro" - } - values = append(values, []string{ vCluster.Name, - vCluster.Cluster, vCluster.Namespace, vCluster.Status, vCluster.Version, isConnected, - vCluster.Created.String(), time.Since(vCluster.Created).Round(1 * time.Second).String(), - distro, }) } return values diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/pause.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/pause.go index 72870415..13a764ce 100644 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/pause.go +++ b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/pause.go @@ -3,35 +3,23 @@ package cmd import ( "context" "fmt" - "strconv" - "time" "github.com/pkg/errors" "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - clusterv1 "github.com/loft-sh/agentapi/v3/pkg/apis/loft/cluster/v1" - storagev1 "github.com/loft-sh/api/v3/pkg/apis/storage/v1" - "github.com/loft-sh/api/v3/pkg/product" - proclient "github.com/loft-sh/loftctl/v3/pkg/client" - "github.com/loft-sh/loftctl/v3/pkg/config" loftctlUtil "github.com/loft-sh/loftctl/v3/pkg/util" "github.com/loft-sh/log" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/find" "github.com/loft-sh/vcluster/cmd/vclusterctl/flags" "github.com/loft-sh/vcluster/pkg/lifecycle" - "github.com/loft-sh/vcluster/pkg/procli" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PauseCmd holds the cmd flags type PauseCmd struct { *flags.GlobalFlags - Log log.Logger - kubeClient *kubernetes.Clientset - Project string - ForceDuration int64 + Log log.Logger + kubeClient *kubernetes.Clientset } // NewPauseCmd creates a new command @@ -68,26 +56,16 @@ vcluster pause test --namespace test }, } - cobraCmd.Flags().StringVar(&cmd.Project, "project", "", "[PRO] The pro project the vcluster is in") - cobraCmd.Flags().Int64Var(&cmd.ForceDuration, "prevent-wakeup", -1, product.Replace("[PRO] The amount of seconds this vcluster should sleep until it can be woken up again (use 0 for infinite sleeping). During this time the space can only be woken up by `vcluster resume vcluster`, manually deleting the annotation on the namespace or through the loft UI")) return cobraCmd } // Run executes the functionality func (cmd *PauseCmd) Run(ctx context.Context, args []string) error { - // get pro client - proClient, err := procli.CreateProClient() - if err != nil { - cmd.Log.Debugf("Error creating pro client: %v", err) - } - // find vcluster vClusterName := args[0] - vCluster, proVCluster, err := find.GetVCluster(ctx, proClient, cmd.Context, vClusterName, cmd.Namespace, cmd.Project, cmd.Log) + vCluster, err := find.GetVCluster(ctx, cmd.Context, vClusterName, cmd.Namespace, cmd.Log) if err != nil { return err - } else if proVCluster != nil { - return cmd.pauseProVCluster(ctx, proClient, proVCluster) } err = cmd.prepare(vCluster) @@ -114,50 +92,6 @@ func (cmd *PauseCmd) Run(ctx context.Context, args []string) error { return nil } -func (cmd *PauseCmd) pauseProVCluster(ctx context.Context, proClient proclient.Client, vCluster *procli.VirtualClusterInstanceProject) error { - managementClient, err := proClient.Management() - if err != nil { - return err - } - - cmd.Log.Infof("Putting virtual cluster %s in project %s to sleep", vCluster.VirtualCluster.Name, vCluster.Project.Name) - - virtualClusterInstance, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(vCluster.VirtualCluster.Namespace).Get(ctx, vCluster.VirtualCluster.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if virtualClusterInstance.Annotations == nil { - virtualClusterInstance.Annotations = map[string]string{} - } - virtualClusterInstance.Annotations[clusterv1.SleepModeForceAnnotation] = "true" - if cmd.ForceDuration >= 0 { - virtualClusterInstance.Annotations[clusterv1.SleepModeForceDurationAnnotation] = strconv.FormatInt(cmd.ForceDuration, 10) - } - - _, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(vCluster.VirtualCluster.Namespace).Update(ctx, virtualClusterInstance, metav1.UpdateOptions{}) - if err != nil { - return err - } - - // wait for sleeping - cmd.Log.Info("Wait until virtual cluster is sleeping...") - err = wait.PollUntilContextTimeout(ctx, time.Second, config.Timeout(), false, func(ctx context.Context) (done bool, err error) { - virtualClusterInstance, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(vCluster.VirtualCluster.Namespace).Get(ctx, vCluster.VirtualCluster.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - - return virtualClusterInstance.Status.Phase == storagev1.InstanceSleeping, nil - }) - if err != nil { - return fmt.Errorf("error waiting for vcluster to start sleeping: %w", err) - } - - cmd.Log.Donef("Successfully put vcluster %s to sleep", vCluster.VirtualCluster.Name) - return nil -} - func (cmd *PauseCmd) prepare(vCluster *find.VCluster) error { // load the rest config kubeConfig, err := vCluster.ClientFactory.ClientConfig() diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/pro/start.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/pro/start.go index 7126b129..7037eef2 100644 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/pro/start.go +++ b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/pro/start.go @@ -102,9 +102,6 @@ func (cmd *StartCmd) Run(ctx context.Context) error { // check if vcluster in vcluster _, _, previousContext := find.VClusterFromContext(rawConfig.CurrentContext) - if previousContext == "" { - _, _, previousContext = find.VClusterProFromContext(rawConfig.CurrentContext) - } if previousContext != "" { if terminal.IsTerminalIn { switchBackOption := "No, switch back to context " + previousContext diff --git a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/resume.go b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/resume.go index 54fb706b..40b93cdf 100644 --- a/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/resume.go +++ b/vendor/github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/resume.go @@ -7,14 +7,11 @@ import ( "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" - proclient "github.com/loft-sh/loftctl/v3/pkg/client" loftctlUtil "github.com/loft-sh/loftctl/v3/pkg/util" - "github.com/loft-sh/loftctl/v3/pkg/vcluster" "github.com/loft-sh/log" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/find" "github.com/loft-sh/vcluster/cmd/vclusterctl/flags" "github.com/loft-sh/vcluster/pkg/lifecycle" - "github.com/loft-sh/vcluster/pkg/procli" ) // ResumeCmd holds the cmd flags @@ -61,19 +58,11 @@ vcluster resume test --namespace test // Run executes the functionality func (cmd *ResumeCmd) Run(ctx context.Context, args []string) error { - // get pro client - proClient, err := procli.CreateProClient() - if err != nil { - cmd.Log.Debugf("Error creating pro client: %v", err) - } - // find vcluster vClusterName := args[0] - vCluster, proVCluster, err := find.GetVCluster(ctx, proClient, cmd.Context, vClusterName, cmd.Namespace, cmd.Project, cmd.Log) + vCluster, err := find.GetVCluster(ctx, cmd.Context, vClusterName, cmd.Namespace, cmd.Log) if err != nil { return err - } else if proVCluster != nil { - return cmd.resumeProVCluster(ctx, proClient, proVCluster) } err = cmd.prepare(vCluster) @@ -90,23 +79,6 @@ func (cmd *ResumeCmd) Run(ctx context.Context, args []string) error { return nil } -func (cmd *ResumeCmd) resumeProVCluster(ctx context.Context, proClient proclient.Client, vCluster *procli.VirtualClusterInstanceProject) error { - managementClient, err := proClient.Management() - if err != nil { - return err - } - - cmd.Log.Infof("Waking up virtual cluster %s in project %s", vCluster.VirtualCluster.Name, vCluster.Project.Name) - - _, err = vcluster.WaitForVirtualClusterInstance(ctx, managementClient, vCluster.VirtualCluster.Namespace, vCluster.VirtualCluster.Name, true, cmd.Log) - if err != nil { - return err - } - - cmd.Log.Donef("Successfully woke up vcluster %s", vCluster.VirtualCluster.Name) - return nil -} - func (cmd *ResumeCmd) prepare(vCluster *find.VCluster) error { // load the rest config kubeConfig, err := vCluster.ClientFactory.ClientConfig() diff --git a/vendor/github.com/loft-sh/vcluster/config/config.go b/vendor/github.com/loft-sh/vcluster/config/config.go index 092bb160..128ac0af 100644 --- a/vendor/github.com/loft-sh/vcluster/config/config.go +++ b/vendor/github.com/loft-sh/vcluster/config/config.go @@ -23,6 +23,9 @@ func NewDefaultConfig() (*Config, error) { // Config is the vCluster config. This struct describes valid Helm values for vCluster as well as configuration used by the vCluster binary itself. type Config struct { + // Global values shared across all (sub)charts + Global interface{} `json:"global,omitempty"` + // ExportKubeConfig describes how vCluster should export the vCluster kubeConfig file. ExportKubeConfig ExportKubeConfig `json:"exportKubeConfig,omitempty"` @@ -424,13 +427,13 @@ type RBACPolicyRule struct { } type ControlPlane struct { - // Distro holds virtual cluster related distro options. + // Distro holds virtual cluster related distro options. A distro cannot be changed after vCluster is deployed. Distro Distro `json:"distro,omitempty"` - // BackingStore defines which backing store to use for virtual cluster. If not defined will fallback to the default distro backing store. + // BackingStore defines which backing store to use for virtual cluster. If not defined will use embedded database as a default backing store. BackingStore BackingStore `json:"backingStore,omitempty"` - // CoreDNS defines everything coredns related. + // CoreDNS defines everything related to the coredns that is deployed and used within the vCluster. CoreDNS CoreDNS `json:"coredns,omitempty"` // Proxy defines options for the virtual cluster control plane proxy that is used to do authentication and intercept requests. @@ -476,11 +479,11 @@ type ControlPlaneStatefulSet struct { LabelsAndAnnotations `json:",inline"` - // Pods are additional labels or annotations for the statefulSet pod. + // Additional labels or annotations for the statefulSet pods. Pods LabelsAndAnnotations `json:"pods,omitempty"` // Image is the image for the controlPlane statefulSet container - Image Image `json:"image,omitempty"` + Image StatefulSetImage `json:"image,omitempty"` // ImagePullPolicy is the policy how to pull the image. ImagePullPolicy string `json:"imagePullPolicy,omitempty"` @@ -499,15 +502,15 @@ type ControlPlaneStatefulSet struct { } type Distro struct { + // K8S holds K8s relevant configuration. + K8S DistroK8s `json:"k8s,omitempty"` + // K3S holds K3s relevant configuration. K3S DistroK3s `json:"k3s,omitempty"` // K0S holds k0s relevant configuration. K0S DistroK0s `json:"k0s,omitempty"` - // K8S holds K8s relevant configuration. - K8S DistroK8s `json:"k8s,omitempty"` - // EKS holds eks relevant configuration. EKS DistroK8s `json:"eks,omitempty"` } @@ -530,10 +533,10 @@ type DistroK8s struct { // APIServer holds configuration specific to starting the api server. APIServer DistroContainerEnabled `json:"apiServer,omitempty"` - // ControllerManager holds configuration specific to starting the scheduler. + // ControllerManager holds configuration specific to starting the controller manager. ControllerManager DistroContainerEnabled `json:"controllerManager,omitempty"` - // Scheduler holds configuration specific to starting the scheduler. Enable this via controlPlane.virtualScheduler.enabled + // Scheduler holds configuration specific to starting the scheduler. Enable this via controlPlane.advanced.virtualScheduler.enabled Scheduler DistroContainer `json:"scheduler,omitempty"` DistroCommon `json:",inline"` @@ -551,13 +554,13 @@ type DistroK0s struct { } type DistroCommon struct { - // Env are extra environment variables to use for the main container. + // Env are extra environment variables to use for the main container and NOT the init container. Env []map[string]interface{} `json:"env,omitempty"` - // Resources are the resources for the distro init container + // Resources for the distro init container Resources map[string]interface{} `json:"resources,omitempty"` - // SecurityContext can be used for the distro init container + // Security options can be used for the distro init container SecurityContext map[string]interface{} `json:"securityContext,omitempty"` } @@ -592,6 +595,16 @@ type DistroContainerEnabled struct { ExtraArgs []string `json:"extraArgs,omitempty"` } +type StatefulSetImage struct { + // Configure the registry and repository of the container image, e.g. my-registry.com/my-repo/my-image. + // It defaults to the vCluster pro repository that includes the optional pro modules that are turned off by default. + // If you still want to use the pure OSS build, use 'ghcr.io/loft-sh/vcluster-oss' instead. + Repository string `json:"repository,omitempty"` + + // Tag is the tag of the container image, e.g. latest + Tag string `json:"tag,omitempty"` +} + type Image struct { // Repository is the registry and repository of the container image, e.g. my-registry.com/my-repo/my-image Repository string `json:"repository,omitempty"` @@ -600,11 +613,8 @@ type Image struct { Tag string `json:"tag,omitempty"` } -// LocalObjectReference contains enough information to let you locate the -// referenced object inside the same namespace. -type LocalObjectReference struct { - // Name of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +type ImagePullSecretName struct { + // Name of the image pull secret to use. Name string `json:"name,omitempty"` } @@ -629,7 +639,7 @@ type BackingStore struct { // Etcd defines that etcd should be used as the backend for the virtual cluster Etcd Etcd `json:"etcd,omitempty"` - // Database defines that a database backend should be used as the backend for the virtual cluster + // Database defines that a database backend should be used as the backend for the virtual cluster. This uses a project called kine under the hood which is a shim for bridging Kubernetes and relational databases. Database Database `json:"database,omitempty"` } @@ -645,7 +655,10 @@ type DatabaseKine struct { // Enabled defines if the database should be used. Enabled bool `json:"enabled,omitempty"` - // DataSource is the kine dataSource to use for the database. This depends on the database format. This is optional for the embedded database. + // DataSource is the kine dataSource to use for the database. This depends on the database format. + // This is optional for the embedded database. Examples: + // * mysql: mysql://username:password@tcp(hostname:3306)/k3s + // * postgres: postgres://username:password@hostname:5432/k3s DataSource string `json:"dataSource,omitempty"` // KeyFile is the key file to use for the database. This is optional. @@ -766,7 +779,7 @@ type CoreDNS struct { // Enabled defines if coredns is enabled Enabled bool `json:"enabled,omitempty"` - // Embedded defines if vCluster will start the embedded coredns service + // Embedded defines if vCluster will start the embedded coredns service within the control-plane and not as a separate deployment. This is a PRO feature. Embedded bool `json:"embedded,omitempty" product:"pro"` // Service holds extra options for the coredns service deployed within the virtual cluster @@ -812,7 +825,7 @@ type ControlPlaneProxy struct { // BindAddress under which vCluster will expose the proxy. BindAddress string `json:"bindAddress,omitempty"` - // Port under which vCluster will expose the proxy. + // Port under which vCluster will expose the proxy. Changing port is currently not supported. Port int `json:"port,omitempty"` // ExtraSANs are extra hostnames to sign the vCluster proxy certificate for. @@ -995,7 +1008,7 @@ type ControlPlaneScheduling struct { Affinity map[string]interface{} `json:"affinity,omitempty"` // Tolerations are the tolerations to apply to the pod. - Tolerations []interface{} `json:"tolerations,omitempty"` + Tolerations []map[string]interface{} `json:"tolerations,omitempty"` // PriorityClassName is the priority class name for the the pod. PriorityClassName string `json:"priorityClassName,omitempty"` @@ -1015,7 +1028,7 @@ type ControlPlaneServiceAccount struct { Name string `json:"name,omitempty"` // ImagePullSecrets defines extra image pull secrets for the service account. - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"` + ImagePullSecrets []ImagePullSecretName `json:"imagePullSecrets,omitempty"` // Annotations are extra annotations for this resource. Annotations map[string]string `json:"annotations,omitempty"` @@ -1032,7 +1045,7 @@ type ControlPlaneWorkloadServiceAccount struct { Name string `json:"name,omitempty"` // ImagePullSecrets defines extra image pull secrets for the workload service account. - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"` + ImagePullSecrets []ImagePullSecretName `json:"imagePullSecrets,omitempty"` // Annotations are extra annotations for this resource. Annotations map[string]string `json:"annotations,omitempty"` @@ -1098,7 +1111,7 @@ type ResourceQuota struct { Quota map[string]interface{} `json:"quota,omitempty"` // ScopeSelector is the resource quota scope selector - ScopeSelector ScopeSelector `json:"scopeSelector,omitempty"` + ScopeSelector map[string]interface{} `json:"scopeSelector,omitempty"` // Scopes are the resource quota scopes Scopes []string `json:"scopes,omitempty"` @@ -1106,10 +1119,6 @@ type ResourceQuota struct { LabelsAndAnnotations `json:",inline"` } -type ScopeSelector struct { - MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty"` -} - type LabelSelectorRequirement struct { // key is the label key that the selector applies to. Key string `json:"key"` @@ -1426,11 +1435,11 @@ type ExperimentalSyncSettings struct { // SyncLabels are labels that should get not rewritten when syncing from the virtual cluster. SyncLabels []string `json:"syncLabels,omitempty"` - // LocalManagerMetricsBindAddress is the bind address for the local manager - LocalManagerMetricsBindAddress string `json:"localManagerMetricsBindAddress,omitempty"` + // HostMetricsBindAddress is the bind address for the local manager + HostMetricsBindAddress string `json:"hostMetricsBindAddress,omitempty"` - // VirtualManagerMetricsBindAddress is the bind address for the virtual manager - VirtualManagerMetricsBindAddress string `json:"virtualManagerMetricsBindAddress,omitempty"` + // VirtualMetricsBindAddress is the bind address for the virtual manager + VirtualMetricsBindAddress string `json:"virtualMetricsBindAddress,omitempty"` } type ExperimentalDeploy struct { @@ -1679,74 +1688,38 @@ type PatchSync struct { type DenyRule struct { // The name of the check. - // +optional Name string `json:"name,omitempty"` // Namespace describe a list of namespaces that will be affected by the check. // An empty list means that all namespaces will be affected. // In case of ClusterScoped rules, only the Namespace resource is affected. - // +optional Namespaces []string `json:"namespaces,omitempty"` // Rules describes on which verbs and on what resources/subresources the webhook is enforced. // The webhook is enforced if it matches any Rule. // The version of the request must match the rule version exactly. Equivalent matching is not supported. - // +optional Rules []RuleWithVerbs `json:"rules,omitempty"` // ExcludedUsers describe a list of users for which the checks will be skipped. // Impersonation attempts on these users will still be subjected to the checks. - // +optional ExcludedUsers []string `json:"excludedUsers,omitempty"` } type RuleWithVerbs struct { // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - // +listType=atomic APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` - // scope specifies the scope of this rule. - // Valid values are "Cluster", "Namespaced", and "*" - // "Cluster" means that only cluster-scoped resources will match this rule. - // Namespace API objects are cluster-scoped. - // "Namespaced" means that only namespaced resources will match this rule. - // "*" means that there are no scope restrictions. - // Subresources match the scope of their parent resource. - // Default is "*". - // - // +optional + // Scope specifies the scope of this rule. Scope *string `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` // Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch. // For non-resource requests, this is the lowercase http verb. // If '*' is present, the length of the slice must be one. - // Required. - // +listType=atomic Verbs []string `json:"operations,omitempty"` } diff --git a/vendor/github.com/loft-sh/vcluster/config/default_extra_values.go b/vendor/github.com/loft-sh/vcluster/config/default_extra_values.go index c02461f7..73e866ea 100644 --- a/vendor/github.com/loft-sh/vcluster/config/default_extra_values.go +++ b/vendor/github.com/loft-sh/vcluster/config/default_extra_values.go @@ -87,22 +87,6 @@ var EKSSchedulerVersionMap = map[string]string{ "1.25": "public.ecr.aws/eks-distro/kubernetes/kube-scheduler:v1.25.14-eks-1-25-23", } -// EKSEtcdVersionMap holds the supported eks etcd -var EKSEtcdVersionMap = map[string]string{ - "1.28": "public.ecr.aws/eks-distro/etcd-io/etcd:v3.5.9-eks-1-28-6", - "1.27": "public.ecr.aws/eks-distro/etcd-io/etcd:v3.5.8-eks-1-27-13", - "1.26": "public.ecr.aws/eks-distro/etcd-io/etcd:v3.5.8-eks-1-26-19", - "1.25": "public.ecr.aws/eks-distro/etcd-io/etcd:v3.5.8-eks-1-25-23", -} - -// EKSCoreDNSVersionMap holds the supported eks core dns -var EKSCoreDNSVersionMap = map[string]string{ - "1.28": "public.ecr.aws/eks-distro/coredns/coredns:v1.10.1-eks-1-28-6", - "1.27": "public.ecr.aws/eks-distro/coredns/coredns:v1.10.1-eks-1-27-13", - "1.26": "public.ecr.aws/eks-distro/coredns/coredns:v1.9.3-eks-1-26-19", - "1.25": "public.ecr.aws/eks-distro/coredns/coredns:v1.9.3-eks-1-25-23", -} - // ExtraValuesOptions holds the chart options type ExtraValuesOptions struct { Distro string @@ -119,22 +103,18 @@ type ExtraValuesOptions struct { PlatformUserID string } -type Logger interface { - Info(msg string, keysAndValues ...any) -} - type KubernetesVersion struct { Major string Minor string } -func GetExtraValues(options *ExtraValuesOptions, log Logger) (string, error) { +func GetExtraValues(options *ExtraValuesOptions) (string, error) { fromConfig, err := NewDefaultConfig() if err != nil { return "", err } - toConfig, err := getExtraValues(options, log) + toConfig, err := getExtraValues(options) if err != nil { return "", fmt.Errorf("get extra values: %w", err) } @@ -142,97 +122,93 @@ func GetExtraValues(options *ExtraValuesOptions, log Logger) (string, error) { return Diff(fromConfig, toConfig) } -func getExtraValues(options *ExtraValuesOptions, log Logger) (*Config, error) { +func getExtraValues(options *ExtraValuesOptions) (*Config, error) { vConfig, err := NewDefaultConfig() if err != nil { return nil, err } - switch options.Distro { - case K3SDistro: - return getK3SExtraValues(vConfig, options, log) - case K0SDistro: - return getK0SExtraValues(vConfig, options, log) - case K8SDistro: - return getK8SExtraValues(vConfig, options, log) - case EKSDistro: - return getEKSExtraValues(vConfig, options, log) + // apply k3s values + err = applyK3SExtraValues(vConfig, options) + if err != nil { + return nil, err } + // apply k0s values + err = applyK0SExtraValues(vConfig, options) + if err != nil { + return nil, err + } + + // apply k8s values + err = applyK8SExtraValues(vConfig, options) + if err != nil { + return nil, err + } + + // apply eks values + err = applyEKSExtraValues(vConfig, options) + if err != nil { + return nil, err + } + + // add common release values + addCommonReleaseValues(vConfig, options) return vConfig, nil } var replaceRegEx = regexp.MustCompile("[^0-9]+") -func getK3SExtraValues(vConfig *Config, options *ExtraValuesOptions, log Logger) (*Config, error) { +func applyK3SExtraValues(vConfig *Config, options *ExtraValuesOptions) error { // get k3s image - image, err := getImageByVersion(options.KubernetesVersion, K3SVersionMap, log) + image, err := getImageByVersion(options.KubernetesVersion, K3SVersionMap) if err != nil { - return nil, err + return err } // build values - vConfig.ControlPlane.Distro.K3S.Enabled = true if image != "" { vConfig.ControlPlane.Distro.K3S.Image = parseImage(image) } - // add common release values - addCommonReleaseValues(vConfig, options) - return vConfig, nil + return nil } -func getK0SExtraValues(vConfig *Config, options *ExtraValuesOptions, log Logger) (*Config, error) { +func applyK0SExtraValues(vConfig *Config, options *ExtraValuesOptions) error { // get k0s image - image, err := getImageByVersion(options.KubernetesVersion, K0SVersionMap, log) + image, err := getImageByVersion(options.KubernetesVersion, K0SVersionMap) if err != nil { - return nil, err + return err } // build values - vConfig.ControlPlane.Distro.K0S.Enabled = true if image != "" { vConfig.ControlPlane.Distro.K0S.Image = parseImage(image) } - // add common release values - addCommonReleaseValues(vConfig, options) - return vConfig, nil + return nil } -func getEKSExtraValues(vConfig *Config, options *ExtraValuesOptions, log Logger) (*Config, error) { +func applyEKSExtraValues(vConfig *Config, options *ExtraValuesOptions) error { // get api server image - apiImage, err := getImageByVersion(options.KubernetesVersion, EKSAPIVersionMap, log) + apiImage, err := getImageByVersion(options.KubernetesVersion, EKSAPIVersionMap) if err != nil { - return nil, err + return err } // get controller image - controllerImage, err := getImageByVersion(options.KubernetesVersion, EKSControllerVersionMap, log) + controllerImage, err := getImageByVersion(options.KubernetesVersion, EKSControllerVersionMap) if err != nil { - return nil, err + return err } // get scheduler image - schedulerImage, err := getImageByVersion(options.KubernetesVersion, EKSSchedulerVersionMap, log) - if err != nil { - return nil, err - } - - // get etcd image - etcdImage, err := getImageByVersion(options.KubernetesVersion, EKSEtcdVersionMap, log) + schedulerImage, err := getImageByVersion(options.KubernetesVersion, EKSSchedulerVersionMap) if err != nil { - return nil, err - } - - // get coredns image - coreDNSImage, err := getImageByVersion(options.KubernetesVersion, EKSCoreDNSVersionMap, log) - if err != nil { - return nil, err + return err } // build values - vConfig.ControlPlane.Distro.EKS.Enabled = true if apiImage != "" { vConfig.ControlPlane.Distro.EKS.APIServer.Image = parseImage(apiImage) } @@ -242,40 +218,33 @@ func getEKSExtraValues(vConfig *Config, options *ExtraValuesOptions, log Logger) if schedulerImage != "" { vConfig.ControlPlane.Distro.EKS.Scheduler.Image = parseImage(schedulerImage) } - if etcdImage != "" { - vConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Image = parseImage(etcdImage) - } - if coreDNSImage != "" { - vConfig.ControlPlane.CoreDNS.Deployment.Image = coreDNSImage - } - addCommonReleaseValues(vConfig, options) - return vConfig, nil + return nil } -func getK8SExtraValues(vConfig *Config, options *ExtraValuesOptions, log Logger) (*Config, error) { +func applyK8SExtraValues(vConfig *Config, options *ExtraValuesOptions) error { // get api server image - apiImage, err := getImageByVersion(options.KubernetesVersion, K8SAPIVersionMap, log) + apiImage, err := getImageByVersion(options.KubernetesVersion, K8SAPIVersionMap) if err != nil { - return nil, err + return err } // get controller image - controllerImage, err := getImageByVersion(options.KubernetesVersion, K8SControllerVersionMap, log) + controllerImage, err := getImageByVersion(options.KubernetesVersion, K8SControllerVersionMap) if err != nil { - return nil, err + return err } // get scheduler image - schedulerImage, err := getImageByVersion(options.KubernetesVersion, K8SSchedulerVersionMap, log) + schedulerImage, err := getImageByVersion(options.KubernetesVersion, K8SSchedulerVersionMap) if err != nil { - return nil, err + return err } // get etcd image - etcdImage, err := getImageByVersion(options.KubernetesVersion, K8SEtcdVersionMap, log) + etcdImage, err := getImageByVersion(options.KubernetesVersion, K8SEtcdVersionMap) if err != nil { - return nil, err + return err } // build values @@ -292,8 +261,7 @@ func getK8SExtraValues(vConfig *Config, options *ExtraValuesOptions, log Logger) vConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Image = parseImage(etcdImage) } - addCommonReleaseValues(vConfig, options) - return vConfig, nil + return nil } func parseImage(image string) Image { @@ -308,7 +276,7 @@ func parseImage(image string) Image { return Image{} } -func getImageByVersion(kubernetesVersion KubernetesVersion, versionImageMap map[string]string, log Logger) (string, error) { +func getImageByVersion(kubernetesVersion KubernetesVersion, versionImageMap map[string]string) (string, error) { // check if there is a minor and major version if kubernetesVersion.Minor == "" || kubernetesVersion.Major == "" { return "", nil @@ -347,10 +315,8 @@ func getImageByVersion(kubernetesVersion KubernetesVersion, versionImageMap map[ image, ok := versionImageMap[serverVersionString] if !ok { if serverMinorInt > highestMinorVersion { - log.Info(fmt.Sprintf("officially unsupported host server version %s, will fallback to virtual cluster version v1.%d", serverVersionString, highestMinorVersion)) image = versionImageMap["1."+strconv.Itoa(highestMinorVersion)] } else { - log.Info(fmt.Sprintf("officially unsupported host server version %s, will fallback to virtual cluster version v1.%d", serverVersionString, lowestMinorVersion)) image = versionImageMap["1."+strconv.Itoa(lowestMinorVersion)] } } @@ -385,6 +351,18 @@ func addCommonReleaseValues(config *Config, options *ExtraValuesOptions) { config.Telemetry.PlatformInstanceID = options.PlatformInstanceID config.Telemetry.MachineID = options.MachineID } + + if options.Distro != "" { + switch options.Distro { + case K3SDistro: + config.ControlPlane.Distro.K3S.Enabled = true + case K0SDistro: + config.ControlPlane.Distro.K0S.Enabled = true + case EKSDistro: + config.ControlPlane.Distro.EKS.Enabled = true + case K8SDistro: + } + } } func getKubernetesVersion(serverVersion KubernetesVersion) string { diff --git a/vendor/github.com/loft-sh/vcluster/config/diff.go b/vendor/github.com/loft-sh/vcluster/config/diff.go index e2b6e098..0127e537 100644 --- a/vendor/github.com/loft-sh/vcluster/config/diff.go +++ b/vendor/github.com/loft-sh/vcluster/config/diff.go @@ -50,7 +50,7 @@ func diff(from, to any) any { case map[string]interface{}: toMap, ok := to.(map[string]interface{}) if !ok { - return to + return prune(to) } retMap := map[string]interface{}{} @@ -88,9 +88,37 @@ func diff(from, to any) any { } } - return retMap + return prune(retMap) default: - return to + return prune(to) + } +} + +func prune(in interface{}) interface{} { + switch inType := in.(type) { + case []interface{}: + for i, v := range inType { + inType[i] = prune(v) + } + return in + case map[string]interface{}: + if len(inType) == 0 { + return nil + } + + for k, v := range inType { + inType[k] = prune(v) + if inType[k] == nil { + delete(inType, k) + } + } + + if len(inType) == 0 { + return nil + } + return inType + default: + return in } } diff --git a/vendor/github.com/loft-sh/vcluster/config/values.yaml b/vendor/github.com/loft-sh/vcluster/config/values.yaml index 4a3d606a..944daa15 100644 --- a/vendor/github.com/loft-sh/vcluster/config/values.yaml +++ b/vendor/github.com/loft-sh/vcluster/config/values.yaml @@ -1,4 +1,6 @@ -# Sync Options +# DO NOT ADD ANY COMMENTS TO THIS FILE. +# Comments are added automatically in the hack/schema/main.go script according to the type defined in config.go +# If you want to change or add any comment, please change/add it in the config.go and rerun hack/schema/main.go sync: toHost: services: @@ -59,45 +61,8 @@ sync: all: false labels: {} -# Control Plane Options controlPlane: - # What distro to use for vCluster, if none is specified, k3s is used distro: - k3s: - enabled: false - command: [] - extraArgs: [] - imagePullPolicy: "" - image: - repository: "rancher/k3s" - tag: "v1.29.0-k3s1" - securityContext: {} - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 40m - memory: 64Mi - - k0s: - enabled: false - config: "" - command: [] - extraArgs: [] - imagePullPolicy: "" - image: - repository: "k0sproject/k0s" - tag: "v1.29.1-k0s.0" - securityContext: {} - resources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 40m - memory: 64Mi - k8s: enabled: false apiServer: @@ -133,6 +98,41 @@ controlPlane: cpu: 40m memory: 64Mi + k3s: + enabled: false + command: [] + extraArgs: [] + imagePullPolicy: "" + image: + repository: "rancher/k3s" + tag: "v1.29.0-k3s1" + securityContext: {} + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 40m + memory: 64Mi + + k0s: + enabled: false + config: "" + command: [] + extraArgs: [] + imagePullPolicy: "" + image: + repository: "k0sproject/k0s" + tag: "v1.29.1-k0s.0" + securityContext: {} + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 40m + memory: 64Mi + eks: enabled: false apiServer: @@ -172,15 +172,12 @@ controlPlane: database: embedded: enabled: false - # Optional sqlite dataSource - # dataSource: "" external: enabled: false dataSource: "" - # Optional database certificates - # certFile: "" - # keyFile: "" - # caFile: "" + certFile: "" + keyFile: "" + caFile: "" etcd: embedded: enabled: false @@ -219,7 +216,6 @@ controlPlane: persistence: volumeClaim: enabled: true - # Defines if the PVC should get automatically deleted when the StatefulSet is deleted. Can be either Delete or Retain retentionPolicy: Retain size: 5Gi storageClass: "" @@ -244,13 +240,11 @@ controlPlane: embedded: false overwriteManifests: "" overwriteConfig: "" - service: annotations: {} labels: {} spec: type: ClusterIP - deployment: annotations: {} labels: {} @@ -294,7 +288,7 @@ controlPlane: annotations: {} imagePullPolicy: "" image: - repository: "ghcr.io/loft-sh/vcluster" + repository: "ghcr.io/loft-sh/vcluster-pro" tag: "" workingDir: "" @@ -331,7 +325,6 @@ controlPlane: persistence: volumeClaim: enabled: auto - # Defines if the PVC should get automatically deleted when the StatefulSet is deleted. Can be either Delete or Retain retentionPolicy: Retain size: 5Gi storageClass: "" @@ -405,14 +398,13 @@ observability: pods: false networking: - # Embedded CoreDNS plugin config replicateServices: toHost: [] fromHost: [] resolveDNS: [] advanced: clusterDomain: "cluster.local" - fallbackHostCluster: true + fallbackHostCluster: false proxyKubelets: byHostname: true byIP: true @@ -474,7 +466,6 @@ policies: validatingWebhooks: [] mutatingWebhooks: [] -# Export vCluster Kube Config exportKubeConfig: context: "" server: "" @@ -482,10 +473,8 @@ exportKubeConfig: name: "" namespace: "" -# What plugins should get used plugins: {} -# Functionality that is likely to change, use with caution! experimental: multiNamespaceMode: enabled: false diff --git a/vendor/github.com/loft-sh/vcluster/pkg/config/config.go b/vendor/github.com/loft-sh/vcluster/pkg/config/config.go index 637c8f74..b556f2cd 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/config/config.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/config/config.go @@ -4,6 +4,7 @@ import ( "strings" "github.com/loft-sh/vcluster/config" + "github.com/loft-sh/vcluster/pkg/config/legacyconfig" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/discovery" @@ -12,6 +13,10 @@ import ( "k8s.io/klog/v2" ) +const ( + DefaultHostsRewriteImage = "library/alpine:3.13.1" +) + // VirtualClusterConfig wraps the config and adds extra info such as name, serviceName and targetNamespace type VirtualClusterConfig struct { // Holds the vCluster config @@ -116,7 +121,7 @@ func (v VirtualClusterConfig) VirtualClusterKubeConfig() config.VirtualClusterKu } // LegacyOptions converts the config to the legacy cluster options -func (v VirtualClusterConfig) LegacyOptions() (*LegacyVirtualClusterOptions, error) { +func (v VirtualClusterConfig) LegacyOptions() (*legacyconfig.LegacyVirtualClusterOptions, error) { legacyPlugins := []string{} for pluginName, plugin := range v.Plugin { if plugin.Version != "" && !plugin.Optional { @@ -136,8 +141,8 @@ func (v VirtualClusterConfig) LegacyOptions() (*LegacyVirtualClusterOptions, err nodeSelector = strings.Join(selectors, ",") } - return &LegacyVirtualClusterOptions{ - ProOptions: LegacyVirtualClusterProOptions{ + return &legacyconfig.LegacyVirtualClusterOptions{ + ProOptions: legacyconfig.LegacyVirtualClusterProOptions{ RemoteKubeConfig: v.Experimental.IsolatedControlPlane.KubeConfig, RemoteNamespace: v.Experimental.IsolatedControlPlane.Namespace, RemoteServiceName: v.Experimental.IsolatedControlPlane.Service, diff --git a/vendor/github.com/loft-sh/vcluster/pkg/config/legacyconfig/config.go b/vendor/github.com/loft-sh/vcluster/pkg/config/legacyconfig/config.go new file mode 100644 index 00000000..c5a730a9 --- /dev/null +++ b/vendor/github.com/loft-sh/vcluster/pkg/config/legacyconfig/config.go @@ -0,0 +1,433 @@ +package legacyconfig + +import "github.com/loft-sh/vcluster/config" + +type LegacyK0sAndK3s struct { + BaseHelm + AutoDeletePersistentVolumeClaims bool `json:"autoDeletePersistentVolumeClaims,omitempty"` + K3sToken string `json:"k3sToken,omitempty"` + VCluster VClusterValues `json:"vcluster,omitempty"` + EmbeddedEtcd EmbeddedEtcdValues `json:"embeddedEtcd,omitempty"` + Syncer SyncerValues `json:"syncer,omitempty"` + Storage Storage `json:"storage,omitempty"` +} + +type LegacyK8s struct { + BaseHelm + Syncer K8sSyncerValues `json:"syncer,omitempty"` + API APIServerValues `json:"api,omitempty"` + Controller ControllerValues `json:"controller,omitempty"` + Scheduler SchedulerValues `json:"scheduler,omitempty"` + Etcd EtcdValues `json:"etcd,omitempty"` + EmbeddedEtcd EmbeddedEtcdValues `json:"embeddedEtcd,omitempty"` + Storage Storage `json:"storage,omitempty"` +} + +type K8sSyncerValues struct { + SyncerValues + CommonValues + SecurityContext map[string]interface{} `json:"securityContext,omitempty"` + PodSecurityContext map[string]interface{} `json:"podSecurityContext,omitempty"` +} + +type APIServerValues struct { + SyncerExORCommonValues + ControlPlaneCommonValues +} + +type ControllerValues struct { + SyncerExORCommonValues + ControlPlaneCommonValues +} + +type SchedulerValues struct { + SyncerExORCommonValues + ControlPlaneCommonValues + Disabled bool `json:"disabled,omitempty"` +} + +type EtcdValues struct { + // Disabled is allowed for k8s & eks + Disabled bool `json:"disabled,omitempty"` + CommonValues + SyncerExORCommonValues + ControlPlaneCommonValues + SecurityContext map[string]interface{} `json:"securityContext,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + AutoDeletePersistentVolumeClaims bool `json:"autoDeletePersistentVolumeClaims,omitempty"` + Replicas int `json:"replicas,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Storage Storage `json:"storage,omitempty"` +} + +type MonitoringValues struct { + ServiceMonitor ServiceMonitor `json:"serviceMonitor,omitempty"` +} + +type ServiceMonitor struct { + Enabled bool `json:"enabled,omitempty"` +} + +type EmbeddedEtcdValues struct { + Enabled bool `json:"enabled,omitempty"` + MigrateFromEtcd bool `json:"migrateFromEtcd,omitempty"` +} + +type Storage struct { + Persistence *bool `json:"persistence,omitempty"` + Size string `json:"size,omitempty"` + ClassName string `json:"className,omitempty"` +} + +type BaseHelm struct { + GlobalAnnotations map[string]string `json:"globalAnnotations,omitempty"` + Pro bool `json:"pro,omitempty"` + ProLicenseSecret string `json:"proLicenseSecret,omitempty"` + Headless bool `json:"headless,omitempty"` + DefaultImageRegistry string `json:"defaultImageRegistry,omitempty"` + Plugin map[string]interface{} `json:"plugin,omitempty"` + Sync SyncValues `json:"sync,omitempty"` + FallbackHostDNS bool `json:"fallbackHostDns,omitempty"` + MapServices MapServices `json:"mapServices,omitempty"` + Proxy ProxyValues `json:"proxy,omitempty"` + Volumes []map[string]interface{} `json:"volumes,omitempty"` + ServiceAccount struct { + Create *bool `json:"create,omitempty"` + Name string `json:"name,omitempty"` + ImagePullSecrets []config.ImagePullSecretName `json:"imagePullSecrets"` + } `json:"serviceAccount,omitempty"` + WorkloadServiceAccount struct { + Annotations map[string]string `json:"annotations,omitempty"` + } `json:"workloadServiceAccount,omitempty"` + Rbac RBACValues `json:"rbac,omitempty"` + NodeSelector map[string]interface{} `json:"nodeSelector,omitempty"` + Affinity map[string]interface{} `json:"affinity,omitempty"` + PriorityClassName string `json:"priorityClassName,omitempty"` + Tolerations []map[string]interface{} `json:"tolerations,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + PodLabels map[string]string `json:"podLabels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + PodDisruptionBudget PDBValues `json:"podDisruptionBudget,omitempty"` + Service ServiceValues `json:"service,omitempty"` + Ingress IngressValues `json:"ingress,omitempty"` + + SecurityContext map[string]interface{} `json:"securityContext,omitempty"` + PodSecurityContext map[string]interface{} `json:"podSecurityContext,omitempty"` + Openshift struct { + Enable bool `json:"enable,omitempty"` + } `json:"openshift,omitempty"` + Coredns CoreDNSValues `json:"coredns,omitempty"` + Isolation IsolationValues `json:"isolation,omitempty"` + Init InitValues `json:"init,omitempty"` + MultiNamespaceMode EnabledSwitch `json:"multiNamespaceMode,omitempty"` + Telemetry TelemetryValues `json:"telemetry,omitempty"` + NoopSyncer NoopSyncerValues `json:"noopSyncer,omitempty"` + Monitoring MonitoringValues `json:"monitoring,omitempty"` + CentralAdmission AdmissionValues `json:"centralAdmission,omitempty"` +} + +type SyncerValues struct { + ControlPlaneCommonValues + ExtraArgs []string `json:"extraArgs,omitempty"` + Env []map[string]interface{} `json:"env,omitempty"` + LivenessProbe EnabledSwitch `json:"livenessProbe,omitempty"` + ReadinessProbe EnabledSwitch `json:"readinessProbe,omitempty"` + VolumeMounts []map[string]interface{} `json:"volumeMounts,omitempty"` + ExtraVolumeMounts []config.VolumeMount `json:"extraVolumeMounts,omitempty"` + Resources config.Resources `json:"resources,omitempty"` + KubeConfigContextName string `json:"kubeConfigContextName,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + Replicas int32 `json:"replicas,omitempty"` + Storage Storage `json:"storage,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +type SyncValues struct { + Services EnabledSwitch `json:"services,omitempty"` + Configmaps SyncConfigMaps `json:"configmaps,omitempty"` + Secrets SyncSecrets `json:"secrets,omitempty"` + Endpoints EnabledSwitch `json:"endpoints,omitempty"` + Pods SyncPods `json:"pods,omitempty"` + Events EnabledSwitch `json:"events,omitempty"` + PersistentVolumeClaims EnabledSwitch `json:"persistentvolumeclaims,omitempty"` + Ingresses EnabledSwitch `json:"ingresses,omitempty"` + Ingressclasses EnabledSwitch `json:"ingressclasses,omitempty"` + FakeNodes EnabledSwitch `json:"fake-nodes,omitempty"` + FakePersistentvolumes EnabledSwitch `json:"fake-persistentvolumes,omitempty"` + Nodes SyncNodes `json:"nodes,omitempty"` + PersistentVolumes EnabledSwitch `json:"persistentvolumes,omitempty"` + StorageClasses EnabledSwitch `json:"storageclasses,omitempty"` + Hoststorageclasses EnabledSwitch `json:"hoststorageclasses,omitempty"` + Priorityclasses EnabledSwitch `json:"priorityclasses,omitempty"` + Networkpolicies EnabledSwitch `json:"networkpolicies,omitempty"` + Volumesnapshots EnabledSwitch `json:"volumesnapshots,omitempty"` + Poddisruptionbudgets EnabledSwitch `json:"poddisruptionbudgets,omitempty"` + Serviceaccounts EnabledSwitch `json:"serviceaccounts,omitempty"` + Generic SyncGeneric `json:"generic,omitempty"` +} + +type SyncConfigMaps struct { + Enabled *bool `json:"enabled,omitempty"` + All bool `json:"all,omitempty"` +} + +type SyncSecrets struct { + Enabled *bool `json:"enabled,omitempty"` + All bool `json:"all,omitempty"` +} + +type SyncPods struct { + Enabled *bool `json:"enabled,omitempty"` + EphemeralContainers *bool `json:"ephemeralContainers,omitempty"` + Status *bool `json:"status,omitempty"` +} + +type SyncNodes struct { + Enabled *bool `json:"enabled,omitempty"` + + FakeKubeletIPs *bool `json:"fakeKubeletIPs,omitempty"` + SyncAllNodes *bool `json:"syncAllNodes,omitempty"` + NodeSelector string `json:"nodeSelector,omitempty"` + EnableScheduler *bool `json:"enableScheduler,omitempty"` + SyncNodeChanges *bool `json:"syncNodeChanges,omitempty"` +} + +type SyncGeneric struct { + Config string `json:"config,omitempty"` +} + +type EnabledSwitch struct { + Enabled *bool `json:"enabled,omitempty"` +} + +type MapServices struct { + FromVirtual []config.ServiceMapping `json:"fromVirtual,omitempty"` + FromHost []config.ServiceMapping `json:"fromHost,omitempty"` +} + +type ProxyValues struct { + MetricsServer MetricsProxyServerConfig `json:"metricsServer,omitempty"` +} + +type MetricsProxyServerConfig struct { + Nodes EnabledSwitch `json:"nodes,omitempty"` + Pods EnabledSwitch `json:"pods,omitempty"` +} + +type VClusterValues struct { + Image string `json:"image,omitempty"` + ImagePullPolicy string `json:"imagePullPolicy,omitempty"` + Command []string `json:"command,omitempty"` + BaseArgs []string `json:"baseArgs,omitempty"` + ExtraArgs []string `json:"extraArgs,omitempty"` + ExtraVolumeMounts []config.VolumeMount `json:"extraVolumeMounts,omitempty"` + VolumeMounts []map[string]interface{} `json:"volumeMounts,omitempty"` + Env []map[string]interface{} `json:"env,omitempty"` + Resources map[string]interface{} `json:"resources,omitempty"` + + // this is only provided in context of k0s right now + PriorityClassName string `json:"priorityClassName,omitempty"` +} + +// These should be remove from the chart first as they are deprecated there +type RBACValues struct { + ClusterRole RBACClusterRoleValues `json:"clusterRole,omitempty"` + Role RBACRoleValues `json:"role,omitempty"` +} + +type RBACClusterRoleValues struct { + Create *bool `json:"create,omitempty"` + ExtraRules []map[string]interface{} `json:"extraRules,omitempty"` +} + +type RBACRoleValues struct { + Create *bool `json:"create,omitempty"` + ExtraRules []map[string]interface{} `json:"extraRules,omitempty"` + ExcludedAPIResources []string `json:"excludedApiResources,omitempty"` +} + +type RBACRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. '*' represents all resources. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` +} + +type PDBValues struct { + Enabled bool `json:"enabled,omitempty"` + MinAvailable interface{} `json:"minAvailable,omitempty"` + MaxUnavailable interface{} `json:"maxUnavailable,omitempty"` +} + +type ServiceValues struct { + Type string `json:"type,omitempty"` + ExternalIPs []string `json:"externalIPs,omitempty"` + ExternalTrafficPolicy string `json:"externalTrafficPolicy,omitempty"` + LoadBalancerIP string `json:"loadBalancerIP,omitempty"` + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` + LoadBalancerClass string `json:"loadBalancerClass,omitempty"` + LoadBalancerAnnotation map[string]string `json:"loadBalancerAnnotations,omitempty"` +} + +type IngressValues struct { + Enabled bool `json:"enabled,omitempty"` + PathType string `json:"pathType,omitempty"` + IngressClassName string `json:"ingressClassName,omitempty"` + Host string `json:"host,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + TLS []interface{} `json:"tls,omitempty"` +} + +type CoreDNSValues struct { + Enabled *bool `json:"enabled,omitempty"` + Integrated bool `json:"integrated,omitempty"` + Fallback string `json:"fallback,omitempty"` + Plugin CoreDNSPluginValues `json:"plugin,omitempty"` + Replicas int `json:"replicas,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Image string `json:"image,omitempty"` + Config string `json:"config,omitempty"` + Service CoreDNSServiceValues `json:"service,omitempty"` + Resources *config.Resources `json:"resources,omitempty"` + Manifests string `json:"manifests,omitempty"` + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + PodLabels map[string]string `json:"podLabels,omitempty"` +} + +type CoreDNSPluginValues struct { + Enabled bool `json:"enabled,omitempty"` + Config []DNSMappings `json:"config,omitempty"` +} + +type DNSMappings struct { + Record Record `json:"record,omitempty"` + Target Target `json:"target,omitempty"` + AllowedOn []FilterSpec `json:"allowedOn,omitempty"` + ExceptOn []FilterSpec `json:"exceptOn,omitempty"` +} + +type Record struct { + RecordType RecordType `json:"recordType,omitempty"` + FQDN *string `json:"fqdn,omitempty"` + Service *string `json:"service,omitempty"` + Namespace *string `json:"namespace,omitempty"` +} + +type RecordType string +type TargetMode string + +type Target struct { + Mode TargetMode `json:"mode,omitempty"` + VCluster *string `json:"vcluster,omitempty"` + URL *string `json:"url,omitempty"` + Service *string `json:"service,omitempty"` + Namespace *string `json:"namespace,omitempty"` +} + +type FilterSpec struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Labels []string `json:"labels,omitempty"` +} + +type CoreDNSServiceValues struct { + Type string `json:"type,omitempty"` + ExternalIPs []string `json:"externalIPs,omitempty"` + ExternalTrafficPolicy string `json:"externalTrafficPolicy,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +type IsolationValues struct { + Enabled bool `json:"enabled,omitempty"` + Namespace *string `json:"namespace,omitempty"` + PodSecurityStandard string `json:"podSecurityStandard,omitempty"` + NodeProxyPermission EnabledSwitch `json:"nodeProxyPermission,omitempty"` + + ResourceQuota struct { + Enabled *bool `json:"enabled,omitempty"` + Quota map[string]interface{} `json:"quota,omitempty"` + ScopeSelector map[string]interface{} `json:"scopeSelector,omitempty"` + Scopes []string `json:"scopes,omitempty"` + } `json:"resourceQuota,omitempty"` + + LimitRange IsolationLimitRangeValues `json:"limitRange,omitempty"` + NetworkPolicy NetworkPolicyValues `json:"networkPolicy,omitempty"` +} + +type IsolationLimitRangeValues struct { + Enabled *bool `json:"enabled,omitempty"` + Default map[string]interface{} `json:"default,omitempty"` + DefaultRequest map[string]interface{} `json:"defaultRequest,omitempty"` +} + +type NetworkPolicyValues struct { + Enabled *bool `json:"enabled,omitempty"` + OutgoingConnections config.OutgoingConnections `json:"outgoingConnections,omitempty"` +} + +type InitValues struct { + Manifests string `json:"manifests,omitempty"` + ManifestsTemplate string `json:"manifestsTemplate,omitempty"` + Helm []config.ExperimentalDeployHelm `json:"helm,omitempty"` +} + +type TelemetryValues struct { + Disabled config.StrBool `json:"disabled,omitempty"` + InstanceCreator string `json:"instanceCreator,omitempty"` + PlatformUserID string `json:"platformUserID,omitempty"` + PlatformInstanceID string `json:"platformInstanceID,omitempty"` + MachineID string `json:"machineID,omitempty"` +} + +type NoopSyncerValues struct { + Enabled bool `json:"enabled,omitempty"` + Synck8sService bool `json:"synck8sService,omitempty"` + Secret struct { + ServerCaCert string `json:"serverCaCert,omitempty"` + ServerCaKey string `json:"serverCaKey,omitempty"` + ClientCaCert string `json:"clientCaCert,omitempty"` + RequestHeaderCaCert string `json:"requestHeaderCaCert,omitempty"` + KubeConfig string `json:"kubeConfig,omitempty"` + } `json:"secret,omitempty"` +} + +type AdmissionValues struct { + ValidatingWebhooks []config.ValidatingWebhookConfiguration `json:"validatingWebhooks,omitempty"` + MutatingWebhooks []config.MutatingWebhookConfiguration `json:"mutatingWebhooks,omitempty"` +} + +type ControlPlaneCommonValues struct { + Image string `json:"image,omitempty"` + ImagePullPolicy string `json:"imagePullPolicy,omitempty"` +} + +type SyncerExORCommonValues struct { + ExtraArgs []string `json:"extraArgs,omitempty"` + Resources *config.Resources `json:"resources,omitempty"` +} + +type CommonValues struct { + Volumes []map[string]interface{} `json:"volumes,omitempty"` + PriorityClassName string `json:"priorityClassName,omitempty"` + NodeSelector map[string]interface{} `json:"nodeSelector,omitempty"` + Affinity map[string]interface{} `json:"affinity,omitempty"` + Tolerations []map[string]interface{} `json:"tolerations,omitempty"` + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + PodLabels map[string]string `json:"podLabels,omitempty"` +} diff --git a/vendor/github.com/loft-sh/vcluster/pkg/config/legacyconfig/migrate.go b/vendor/github.com/loft-sh/vcluster/pkg/config/legacyconfig/migrate.go new file mode 100644 index 00000000..e65f4484 --- /dev/null +++ b/vendor/github.com/loft-sh/vcluster/pkg/config/legacyconfig/migrate.go @@ -0,0 +1,1132 @@ +package legacyconfig + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/ghodss/yaml" + "github.com/loft-sh/vcluster/config" +) + +func MigrateLegacyConfig(distro, oldValues string) (string, error) { + fromConfig, err := config.NewDefaultConfig() + if err != nil { + return "", err + } + toConfig, err := config.NewDefaultConfig() + if err != nil { + return "", err + } + + switch distro { + case config.K0SDistro, config.K3SDistro: + err = migrateK3sAndK0s(distro, oldValues, toConfig) + if err != nil { + return "", fmt.Errorf("migrate legacy %s values: %w", distro, err) + } + case config.K8SDistro, config.EKSDistro: + err = migrateK8sAndEKS(distro, oldValues, toConfig) + if err != nil { + return "", fmt.Errorf("migrate legacy %s values: %w", distro, err) + } + default: + return "", fmt.Errorf("migrating distro %s is not supported", distro) + } + + return config.Diff(fromConfig, toConfig) +} + +func migrateK8sAndEKS(distro, oldValues string, newConfig *config.Config) error { + // unmarshal legacy config + oldConfig := &LegacyK8s{} + err := yaml.Unmarshal([]byte(oldValues), oldConfig) + if err != nil { + return fmt.Errorf("unmarshal legacy config: %w", err) + } + + // k8s specific + if distro == config.K8SDistro { + newConfig.ControlPlane.Distro.K8S.Enabled = true + convertAPIValues(oldConfig.API, &newConfig.ControlPlane.Distro.K8S.APIServer) + convertControllerValues(oldConfig.Controller, &newConfig.ControlPlane.Distro.K8S.ControllerManager) + convertSchedulerValues(oldConfig.Scheduler, &newConfig.ControlPlane.Distro.K8S.Scheduler) + } else if distro == config.EKSDistro { + newConfig.ControlPlane.Distro.EKS.Enabled = true + convertAPIValues(oldConfig.API, &newConfig.ControlPlane.Distro.EKS.APIServer) + convertControllerValues(oldConfig.Controller, &newConfig.ControlPlane.Distro.EKS.ControllerManager) + convertSchedulerValues(oldConfig.Scheduler, &newConfig.ControlPlane.Distro.EKS.Scheduler) + } + + // convert etcd + err = convertEtcd(oldConfig.Etcd, newConfig) + if err != nil { + return err + } + + // default ordered ready + newConfig.ControlPlane.StatefulSet.Scheduling.PodManagementPolicy = "OrderedReady" + + // storage config + applyStorage(oldConfig.Storage, newConfig) + + // syncer config + err = convertK8sSyncerConfig(oldConfig.Syncer, newConfig) + if err != nil { + return fmt.Errorf("error converting syncer config: %w", err) + } + + // migrate embedded etcd + convertEmbeddedEtcd(oldConfig.EmbeddedEtcd, newConfig) + + // convert the rest + err = convertBaseValues(oldConfig.BaseHelm, newConfig) + if err != nil { + return err + } + + // make default storage deployed etcd + if !newConfig.ControlPlane.BackingStore.Database.External.Enabled && !newConfig.ControlPlane.BackingStore.Database.Embedded.Enabled && !newConfig.ControlPlane.BackingStore.Etcd.Embedded.Enabled { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.Enabled = true + } + + return nil +} + +func migrateK3sAndK0s(distro, oldValues string, newConfig *config.Config) error { + // unmarshal legacy config + oldConfig := &LegacyK0sAndK3s{} + err := yaml.Unmarshal([]byte(oldValues), oldConfig) + if err != nil { + return fmt.Errorf("unmarshal legacy config: %w", err) + } + + // distro specific + if distro == config.K0SDistro { + newConfig.ControlPlane.Distro.K0S.Enabled = true + + // vcluster config + err = convertVClusterConfig(oldConfig.VCluster, &newConfig.ControlPlane.Distro.K0S.DistroCommon, &newConfig.ControlPlane.Distro.K0S.DistroContainer, newConfig) + if err != nil { + return fmt.Errorf("error converting vcluster config: %w", err) + } + } else if distro == config.K3SDistro { + newConfig.ControlPlane.Distro.K3S.Enabled = true + newConfig.ControlPlane.Distro.K3S.Token = oldConfig.K3sToken + + // vcluster config + err = convertVClusterConfig(oldConfig.VCluster, &newConfig.ControlPlane.Distro.K3S.DistroCommon, &newConfig.ControlPlane.Distro.K3S.DistroContainer, newConfig) + if err != nil { + return fmt.Errorf("error converting vcluster config: %w", err) + } + } + + // general things to update + newConfig.ControlPlane.StatefulSet.Scheduling.PodManagementPolicy = "OrderedReady" + if oldConfig.AutoDeletePersistentVolumeClaims { + newConfig.ControlPlane.StatefulSet.Persistence.VolumeClaim.RetentionPolicy = "Delete" + } + + // storage config + applyStorage(oldConfig.Storage, newConfig) + + // syncer config + err = convertSyncerConfig(oldConfig.Syncer, newConfig) + if err != nil { + return fmt.Errorf("error converting syncer config: %w", err) + } + + // migrate embedded etcd + convertEmbeddedEtcd(oldConfig.EmbeddedEtcd, newConfig) + + // convert the rest + return convertBaseValues(oldConfig.BaseHelm, newConfig) +} + +func convertEtcd(oldConfig EtcdValues, newConfig *config.Config) error { + if oldConfig.Disabled { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Enabled = false + newConfig.ControlPlane.BackingStore.Etcd.Deploy.Service.Enabled = false + newConfig.ControlPlane.BackingStore.Etcd.Deploy.HeadlessService.Enabled = false + } + if oldConfig.ImagePullPolicy != "" { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.ImagePullPolicy = oldConfig.ImagePullPolicy + } + if oldConfig.Image != "" { + convertImage(oldConfig.Image, &newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Image) + } + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.ExtraArgs = oldConfig.ExtraArgs + if oldConfig.Resources != nil { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Resources = *oldConfig.Resources + } + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Persistence.AddVolumes = oldConfig.Volumes + if oldConfig.PriorityClassName != "" { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Scheduling.PriorityClassName = oldConfig.PriorityClassName + } + if len(oldConfig.NodeSelector) > 0 { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Scheduling.NodeSelector = oldConfig.NodeSelector + } + if len(oldConfig.Affinity) > 0 { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Scheduling.Affinity = oldConfig.Affinity + } + if len(oldConfig.Tolerations) > 0 { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Scheduling.Tolerations = oldConfig.Tolerations + } + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Pods.Annotations = oldConfig.PodAnnotations + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Pods.Labels = oldConfig.PodLabels + if len(oldConfig.SecurityContext) > 0 { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Security.ContainerSecurityContext = oldConfig.SecurityContext + } + if len(oldConfig.ServiceAnnotations) > 0 { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.Service.Annotations = oldConfig.ServiceAnnotations + } + if oldConfig.AutoDeletePersistentVolumeClaims { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Persistence.VolumeClaim.RetentionPolicy = "Delete" + } + if oldConfig.Replicas > 0 { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.HighAvailability.Replicas = oldConfig.Replicas + } + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Labels = oldConfig.Labels + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Annotations = oldConfig.Annotations + + if oldConfig.Storage.Persistence != nil { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Persistence.VolumeClaim.Enabled = *oldConfig.Storage.Persistence + } + if oldConfig.Storage.Size != "" { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Persistence.VolumeClaim.Size = oldConfig.Storage.Size + } + if oldConfig.Storage.ClassName != "" { + newConfig.ControlPlane.BackingStore.Etcd.Deploy.StatefulSet.Persistence.VolumeClaim.StorageClass = oldConfig.Storage.ClassName + } + + return nil +} + +func convertAPIValues(oldConfig APIServerValues, newContainer *config.DistroContainerEnabled) { + if oldConfig.ImagePullPolicy != "" { + newContainer.ImagePullPolicy = oldConfig.ImagePullPolicy + } + if oldConfig.Image != "" { + convertImage(oldConfig.Image, &newContainer.Image) + } + newContainer.ExtraArgs = oldConfig.ExtraArgs +} + +func convertControllerValues(oldConfig ControllerValues, newContainer *config.DistroContainerEnabled) { + if oldConfig.ImagePullPolicy != "" { + newContainer.ImagePullPolicy = oldConfig.ImagePullPolicy + } + if oldConfig.Image != "" { + convertImage(oldConfig.Image, &newContainer.Image) + } + newContainer.ExtraArgs = oldConfig.ExtraArgs +} + +func convertSchedulerValues(oldConfig SchedulerValues, newContainer *config.DistroContainer) { + if oldConfig.ImagePullPolicy != "" { + newContainer.ImagePullPolicy = oldConfig.ImagePullPolicy + } + if oldConfig.Image != "" { + convertImage(oldConfig.Image, &newContainer.Image) + } + newContainer.ExtraArgs = oldConfig.ExtraArgs +} + +func convertBaseValues(oldConfig BaseHelm, newConfig *config.Config) error { + newConfig.ControlPlane.Advanced.GlobalMetadata.Annotations = oldConfig.GlobalAnnotations + newConfig.Pro = oldConfig.Pro + if strings.Contains(oldConfig.ProLicenseSecret, "/") { + splitted := strings.Split(oldConfig.ProLicenseSecret, "/") + newConfig.Platform.APIKey.SecretRef.Namespace = splitted[0] + newConfig.Platform.APIKey.SecretRef.Name = splitted[1] + } else { + newConfig.Platform.APIKey.SecretRef.Name = oldConfig.ProLicenseSecret + } + + newConfig.Experimental.IsolatedControlPlane.Headless = oldConfig.Headless + newConfig.ControlPlane.Advanced.DefaultImageRegistry = oldConfig.DefaultImageRegistry + + if len(oldConfig.Plugin) > 0 { + err := convertObject(oldConfig.Plugin, &newConfig.Plugin) + if err != nil { + return err + } + } + + newConfig.Networking.Advanced.FallbackHostCluster = oldConfig.FallbackHostDNS + newConfig.ControlPlane.StatefulSet.Labels = oldConfig.Labels + newConfig.ControlPlane.StatefulSet.Annotations = oldConfig.Annotations + newConfig.ControlPlane.StatefulSet.Pods.Labels = oldConfig.PodLabels + newConfig.ControlPlane.StatefulSet.Pods.Annotations = oldConfig.PodAnnotations + newConfig.ControlPlane.StatefulSet.Scheduling.Tolerations = oldConfig.Tolerations + newConfig.ControlPlane.StatefulSet.Scheduling.NodeSelector = oldConfig.NodeSelector + newConfig.ControlPlane.StatefulSet.Scheduling.Affinity = oldConfig.Affinity + newConfig.ControlPlane.StatefulSet.Scheduling.PriorityClassName = oldConfig.PriorityClassName + + newConfig.Networking.ReplicateServices.FromHost = oldConfig.MapServices.FromHost + newConfig.Networking.ReplicateServices.ToHost = oldConfig.MapServices.FromVirtual + + if oldConfig.Proxy.MetricsServer.Pods.Enabled != nil { + newConfig.Observability.Metrics.Proxy.Pods = *oldConfig.Proxy.MetricsServer.Pods.Enabled + } + if oldConfig.Proxy.MetricsServer.Nodes.Enabled != nil { + newConfig.Observability.Metrics.Proxy.Nodes = *oldConfig.Proxy.MetricsServer.Nodes.Enabled + } + + if len(oldConfig.Volumes) > 0 { + newConfig.ControlPlane.StatefulSet.Persistence.AddVolumes = oldConfig.Volumes + } + + if oldConfig.ServiceAccount.Create != nil { + newConfig.ControlPlane.Advanced.ServiceAccount.Enabled = *oldConfig.ServiceAccount.Create + } + if oldConfig.ServiceAccount.Name != "" { + newConfig.ControlPlane.Advanced.ServiceAccount.Name = oldConfig.ServiceAccount.Name + } + if len(oldConfig.ServiceAccount.ImagePullSecrets) > 0 { + newConfig.ControlPlane.Advanced.ServiceAccount.ImagePullSecrets = oldConfig.ServiceAccount.ImagePullSecrets + } + if len(oldConfig.WorkloadServiceAccount.Annotations) > 0 { + newConfig.ControlPlane.Advanced.WorkloadServiceAccount.Annotations = oldConfig.WorkloadServiceAccount.Annotations + } + + newConfig.Policies.CentralAdmission.MutatingWebhooks = oldConfig.CentralAdmission.MutatingWebhooks + newConfig.Policies.CentralAdmission.ValidatingWebhooks = oldConfig.CentralAdmission.ValidatingWebhooks + + if oldConfig.Telemetry.Disabled == "true" { + newConfig.Telemetry.Enabled = false + } + + if oldConfig.MultiNamespaceMode.Enabled != nil { + newConfig.Experimental.MultiNamespaceMode.Enabled = *oldConfig.MultiNamespaceMode.Enabled + } + + if len(oldConfig.SecurityContext) > 0 { + if newConfig.ControlPlane.StatefulSet.Security.ContainerSecurityContext == nil { + newConfig.ControlPlane.StatefulSet.Security.ContainerSecurityContext = map[string]interface{}{} + } + for k, v := range oldConfig.SecurityContext { + newConfig.ControlPlane.StatefulSet.Security.ContainerSecurityContext[k] = v + } + } + if len(oldConfig.PodSecurityContext) > 0 { + if newConfig.ControlPlane.StatefulSet.Security.PodSecurityContext == nil { + newConfig.ControlPlane.StatefulSet.Security.PodSecurityContext = map[string]interface{}{} + } + for k, v := range oldConfig.PodSecurityContext { + newConfig.ControlPlane.StatefulSet.Security.PodSecurityContext[k] = v + } + } + + if oldConfig.Openshift.Enable { + newConfig.RBAC.Role.ExtraRules = append(newConfig.RBAC.Role.ExtraRules, map[string]interface{}{ + "apiGroups": []string{""}, + "resources": []string{"endpoints/restricted"}, + "verbs": []string{"create"}, + }) + } + + newConfig.ControlPlane.ServiceMonitor.Enabled = oldConfig.Monitoring.ServiceMonitor.Enabled + + if len(oldConfig.Rbac.Role.ExtraRules) > 0 { + newConfig.RBAC.Role.ExtraRules = append(newConfig.RBAC.Role.ExtraRules, oldConfig.Rbac.Role.ExtraRules...) + } + if oldConfig.Rbac.Role.Create != nil { + newConfig.RBAC.Role.Enabled = *oldConfig.Rbac.Role.Create + } + if len(oldConfig.Rbac.Role.ExcludedAPIResources) > 0 { + return fmt.Errorf("rbac.role.excludedAPIResources is not supported anymore, please use rbac.role.overwriteRules instead") + } + + if len(oldConfig.Rbac.ClusterRole.ExtraRules) > 0 { + newConfig.RBAC.ClusterRole.ExtraRules = append(newConfig.RBAC.ClusterRole.ExtraRules, oldConfig.Rbac.ClusterRole.ExtraRules...) + } + if oldConfig.Rbac.ClusterRole.Create != nil && *oldConfig.Rbac.ClusterRole.Create { + newConfig.RBAC.ClusterRole.Enabled = "true" + } + + if oldConfig.NoopSyncer.Enabled { + newConfig.Experimental.SyncSettings.DisableSync = true + if oldConfig.NoopSyncer.Secret.KubeConfig != "" { + newConfig.Experimental.VirtualClusterKubeConfig.KubeConfig = oldConfig.NoopSyncer.Secret.KubeConfig + } + if oldConfig.NoopSyncer.Secret.ClientCaCert != "" { + newConfig.Experimental.VirtualClusterKubeConfig.ClientCACert = oldConfig.NoopSyncer.Secret.ClientCaCert + } + if oldConfig.NoopSyncer.Secret.ServerCaKey != "" { + newConfig.Experimental.VirtualClusterKubeConfig.ServerCAKey = oldConfig.NoopSyncer.Secret.ServerCaKey + } + if oldConfig.NoopSyncer.Secret.ServerCaCert != "" { + newConfig.Experimental.VirtualClusterKubeConfig.ServerCACert = oldConfig.NoopSyncer.Secret.ServerCaCert + } + if oldConfig.NoopSyncer.Secret.RequestHeaderCaCert != "" { + newConfig.Experimental.VirtualClusterKubeConfig.RequestHeaderCACert = oldConfig.NoopSyncer.Secret.RequestHeaderCaCert + } + newConfig.Experimental.SyncSettings.RewriteKubernetesService = oldConfig.NoopSyncer.Synck8sService + } + + newConfig.Experimental.Deploy.Manifests = oldConfig.Init.Manifests + newConfig.Experimental.Deploy.ManifestsTemplate = oldConfig.Init.ManifestsTemplate + newConfig.Experimental.Deploy.Helm = oldConfig.Init.Helm + + if oldConfig.Isolation.Enabled { + if oldConfig.Isolation.NetworkPolicy.Enabled != nil { + newConfig.Policies.NetworkPolicy.Enabled = *oldConfig.Isolation.NetworkPolicy.Enabled + } else { + newConfig.Policies.NetworkPolicy.Enabled = true + } + if oldConfig.Isolation.ResourceQuota.Enabled != nil { + newConfig.Policies.ResourceQuota.Enabled = *oldConfig.Isolation.ResourceQuota.Enabled + } else { + newConfig.Policies.ResourceQuota.Enabled = true + } + if oldConfig.Isolation.LimitRange.Enabled != nil { + newConfig.Policies.LimitRange.Enabled = *oldConfig.Isolation.LimitRange.Enabled + } else { + newConfig.Policies.LimitRange.Enabled = true + } + if oldConfig.Isolation.PodSecurityStandard == "" { + newConfig.Policies.PodSecurityStandard = "baseline" + } else { + newConfig.Policies.PodSecurityStandard = oldConfig.Isolation.PodSecurityStandard + } + + if oldConfig.Isolation.NetworkPolicy.OutgoingConnections.IPBlock.CIDR != "" { + newConfig.Policies.NetworkPolicy.OutgoingConnections.IPBlock.CIDR = oldConfig.Isolation.NetworkPolicy.OutgoingConnections.IPBlock.CIDR + } + if len(oldConfig.Isolation.NetworkPolicy.OutgoingConnections.IPBlock.Except) > 0 { + newConfig.Policies.NetworkPolicy.OutgoingConnections.IPBlock.Except = oldConfig.Isolation.NetworkPolicy.OutgoingConnections.IPBlock.Except + } + + if len(oldConfig.Isolation.LimitRange.Default) > 0 { + newConfig.Policies.LimitRange.Default = oldConfig.Isolation.LimitRange.Default + } + if len(oldConfig.Isolation.LimitRange.DefaultRequest) > 0 { + newConfig.Policies.LimitRange.DefaultRequest = oldConfig.Isolation.LimitRange.DefaultRequest + } + if len(oldConfig.Isolation.ResourceQuota.Quota) > 0 { + newConfig.Policies.ResourceQuota.Quota = oldConfig.Isolation.ResourceQuota.Quota + } + if len(oldConfig.Isolation.ResourceQuota.Scopes) > 0 { + newConfig.Policies.ResourceQuota.Scopes = oldConfig.Isolation.ResourceQuota.Scopes + } + if len(oldConfig.Isolation.ResourceQuota.ScopeSelector) > 0 { + newConfig.Policies.ResourceQuota.ScopeSelector = oldConfig.Isolation.ResourceQuota.ScopeSelector + } + + if oldConfig.Isolation.Namespace != nil { + return fmt.Errorf("isolation.namespace is no longer supported, use experimental.syncSettings.targetNamespace instead") + } + if oldConfig.Isolation.NodeProxyPermission.Enabled != nil { + return fmt.Errorf("isolation.nodeProxyPermission.enabled is no longer supported, use rbac.clusterRole.overwriteRules instead") + } + } + + if oldConfig.Coredns.Enabled != nil { + newConfig.ControlPlane.CoreDNS.Enabled = *oldConfig.Coredns.Enabled + } + if oldConfig.Coredns.Fallback != "" { + newConfig.Policies.NetworkPolicy.FallbackDNS = oldConfig.Coredns.Fallback + } + + newConfig.ControlPlane.CoreDNS.Embedded = oldConfig.Coredns.Integrated + if oldConfig.Coredns.Replicas > 0 { + newConfig.ControlPlane.CoreDNS.Deployment.Replicas = oldConfig.Coredns.Replicas + } + newConfig.ControlPlane.CoreDNS.Deployment.NodeSelector = oldConfig.Coredns.NodeSelector + if oldConfig.Coredns.Image != "" { + newConfig.ControlPlane.CoreDNS.Deployment.Image = oldConfig.Coredns.Image + } + if oldConfig.Coredns.Config != "" { + newConfig.ControlPlane.CoreDNS.OverwriteConfig = oldConfig.Coredns.Config + } + if oldConfig.Coredns.Manifests != "" { + newConfig.ControlPlane.CoreDNS.OverwriteManifests = oldConfig.Coredns.Manifests + } + newConfig.ControlPlane.CoreDNS.Deployment.Pods.Labels = oldConfig.Coredns.PodLabels + newConfig.ControlPlane.CoreDNS.Deployment.Pods.Annotations = oldConfig.Coredns.PodAnnotations + if oldConfig.Coredns.Resources != nil { + newConfig.ControlPlane.CoreDNS.Deployment.Resources = *oldConfig.Coredns.Resources + } + if oldConfig.Coredns.Plugin.Enabled { + if len(oldConfig.Coredns.Plugin.Config) > 0 { + return fmt.Errorf("please manually upgrade coredns.plugin.config to networking.resolvedDNS") + } + } + + if len(oldConfig.Coredns.Service.Annotations) > 0 { + newConfig.ControlPlane.CoreDNS.Service.Annotations = oldConfig.Coredns.Service.Annotations + } + if oldConfig.Coredns.Service.Type != "" { + if newConfig.ControlPlane.CoreDNS.Service.Spec == nil { + newConfig.ControlPlane.CoreDNS.Service.Spec = map[string]interface{}{} + } + newConfig.ControlPlane.CoreDNS.Service.Spec["type"] = oldConfig.Coredns.Service.Type + } + if oldConfig.Coredns.Service.ExternalTrafficPolicy != "" { + if newConfig.ControlPlane.CoreDNS.Service.Spec == nil { + newConfig.ControlPlane.CoreDNS.Service.Spec = map[string]interface{}{} + } + newConfig.ControlPlane.CoreDNS.Service.Spec["externalTrafficPolicy"] = oldConfig.Coredns.Service.ExternalTrafficPolicy + } + if len(oldConfig.Coredns.Service.ExternalIPs) > 0 { + if newConfig.ControlPlane.CoreDNS.Service.Spec == nil { + newConfig.ControlPlane.CoreDNS.Service.Spec = map[string]interface{}{} + } + newConfig.ControlPlane.CoreDNS.Service.Spec["externalIPs"] = oldConfig.Coredns.Service.ExternalIPs + } + + // ingress + if oldConfig.Ingress.Enabled { + newConfig.ControlPlane.Ingress.Enabled = true + } + if oldConfig.Ingress.PathType != "" { + newConfig.ControlPlane.Ingress.PathType = oldConfig.Ingress.PathType + } + if oldConfig.Ingress.IngressClassName != "" { + if newConfig.ControlPlane.Ingress.Spec == nil { + newConfig.ControlPlane.Ingress.Spec = map[string]interface{}{} + } + newConfig.ControlPlane.Ingress.Spec["ingressClassName"] = oldConfig.Ingress.IngressClassName + } + if oldConfig.Ingress.Host != "" { + newConfig.ControlPlane.Ingress.Host = oldConfig.Ingress.Host + } + if len(oldConfig.Ingress.Annotations) > 0 { + if newConfig.ControlPlane.Ingress.Annotations == nil { + newConfig.ControlPlane.Ingress.Annotations = nil + } + for k, v := range oldConfig.Ingress.Annotations { + newConfig.ControlPlane.Ingress.Annotations[k] = v + } + } + if len(oldConfig.Ingress.TLS) > 0 { + if newConfig.ControlPlane.Ingress.Spec == nil { + newConfig.ControlPlane.Ingress.Spec = map[string]interface{}{} + } + newConfig.ControlPlane.Ingress.Spec["tls"] = oldConfig.Ingress.TLS + } + + // service + if oldConfig.Service.Type != "" { + if newConfig.ControlPlane.Service.Spec == nil { + newConfig.ControlPlane.Service.Spec = map[string]interface{}{} + } + newConfig.ControlPlane.Service.Spec["type"] = oldConfig.Service.Type + } + if len(oldConfig.Service.ExternalIPs) > 0 { + if newConfig.ControlPlane.Service.Spec == nil { + newConfig.ControlPlane.Service.Spec = map[string]interface{}{} + } + newConfig.ControlPlane.Service.Spec["externalIPs"] = oldConfig.Service.ExternalIPs + } + if oldConfig.Service.ExternalTrafficPolicy != "" { + if newConfig.ControlPlane.Service.Spec == nil { + newConfig.ControlPlane.Service.Spec = map[string]interface{}{} + } + newConfig.ControlPlane.Service.Spec["externalTrafficPolicy"] = oldConfig.Service.ExternalTrafficPolicy + } + + // sync + if oldConfig.Sync.Services.Enabled != nil { + newConfig.Sync.ToHost.Services.Enabled = *oldConfig.Sync.Services.Enabled + } + if oldConfig.Sync.Configmaps.Enabled != nil { + newConfig.Sync.ToHost.ConfigMaps.Enabled = *oldConfig.Sync.Configmaps.Enabled + } + if oldConfig.Sync.Configmaps.All { + newConfig.Sync.ToHost.ConfigMaps.All = oldConfig.Sync.Configmaps.All + } + if oldConfig.Sync.Secrets.Enabled != nil { + newConfig.Sync.ToHost.Secrets.Enabled = *oldConfig.Sync.Secrets.Enabled + } + if oldConfig.Sync.Secrets.All { + newConfig.Sync.ToHost.Secrets.All = oldConfig.Sync.Secrets.All + } + if oldConfig.Sync.Endpoints.Enabled != nil { + newConfig.Sync.ToHost.Endpoints.Enabled = *oldConfig.Sync.Endpoints.Enabled + } + if oldConfig.Sync.Pods.Enabled != nil { + newConfig.Sync.ToHost.Pods.Enabled = *oldConfig.Sync.Pods.Enabled + } + if oldConfig.Sync.Events.Enabled != nil { + newConfig.Sync.FromHost.Events.Enabled = *oldConfig.Sync.Events.Enabled + } + if oldConfig.Sync.PersistentVolumeClaims.Enabled != nil { + newConfig.Sync.ToHost.PersistentVolumeClaims.Enabled = *oldConfig.Sync.PersistentVolumeClaims.Enabled + } + if oldConfig.Sync.Ingresses.Enabled != nil { + newConfig.Sync.ToHost.Ingresses.Enabled = *oldConfig.Sync.Ingresses.Enabled + } + if oldConfig.Sync.Ingressclasses.Enabled != nil { + newConfig.Sync.FromHost.IngressClasses.Enabled = *oldConfig.Sync.Ingressclasses.Enabled + } + if oldConfig.Sync.FakeNodes.Enabled != nil && *oldConfig.Sync.FakeNodes.Enabled { + newConfig.Sync.FromHost.Nodes.Enabled = false + } + if oldConfig.Sync.FakePersistentvolumes.Enabled != nil && *oldConfig.Sync.FakePersistentvolumes.Enabled { + newConfig.Sync.ToHost.PersistentVolumes.Enabled = false + } + if oldConfig.Sync.Nodes.Enabled != nil { + newConfig.Sync.FromHost.Nodes.Enabled = *oldConfig.Sync.Nodes.Enabled + } + if oldConfig.Sync.Nodes.FakeKubeletIPs != nil { + newConfig.Networking.Advanced.ProxyKubelets.ByIP = *oldConfig.Sync.Nodes.FakeKubeletIPs + } + if oldConfig.Sync.Nodes.SyncAllNodes != nil { + newConfig.Sync.FromHost.Nodes.Selector.All = *oldConfig.Sync.Nodes.SyncAllNodes + } + if oldConfig.Sync.Nodes.NodeSelector != "" { + newConfig.Sync.FromHost.Nodes.Selector.Labels = mergeIntoMap(make(map[string]string), strings.Split(oldConfig.Sync.Nodes.NodeSelector, ",")) + } + if oldConfig.Sync.Nodes.EnableScheduler != nil { + newConfig.ControlPlane.Advanced.VirtualScheduler.Enabled = *oldConfig.Sync.Nodes.EnableScheduler + } + if oldConfig.Sync.Nodes.SyncNodeChanges != nil { + newConfig.Sync.FromHost.Nodes.SyncBackChanges = *oldConfig.Sync.Nodes.SyncNodeChanges + } + if oldConfig.Sync.PersistentVolumes.Enabled != nil { + newConfig.Sync.ToHost.PersistentVolumes.Enabled = *oldConfig.Sync.PersistentVolumes.Enabled + } + if oldConfig.Sync.StorageClasses.Enabled != nil { + newConfig.Sync.ToHost.StorageClasses.Enabled = *oldConfig.Sync.StorageClasses.Enabled + } + if oldConfig.Sync.Hoststorageclasses.Enabled != nil { + newConfig.Sync.FromHost.StorageClasses.Enabled = *oldConfig.Sync.Hoststorageclasses.Enabled + } + if oldConfig.Sync.Priorityclasses.Enabled != nil { + newConfig.Sync.ToHost.PriorityClasses.Enabled = *oldConfig.Sync.Priorityclasses.Enabled + } + if oldConfig.Sync.Networkpolicies.Enabled != nil { + newConfig.Sync.ToHost.NetworkPolicies.Enabled = *oldConfig.Sync.Networkpolicies.Enabled + } + if oldConfig.Sync.Volumesnapshots.Enabled != nil { + newConfig.Sync.ToHost.VolumeSnapshots.Enabled = *oldConfig.Sync.Volumesnapshots.Enabled + } + if oldConfig.Sync.Poddisruptionbudgets.Enabled != nil { + newConfig.Sync.ToHost.PodDisruptionBudgets.Enabled = *oldConfig.Sync.Poddisruptionbudgets.Enabled + } + if oldConfig.Sync.Serviceaccounts.Enabled != nil { + newConfig.Sync.ToHost.ServiceAccounts.Enabled = *oldConfig.Sync.Serviceaccounts.Enabled + } + if oldConfig.Sync.Generic.Config != "" { + genericSyncConfig := &config.ExperimentalGenericSync{} + err := yaml.Unmarshal([]byte(oldConfig.Sync.Generic.Config), genericSyncConfig) + if err != nil { + return fmt.Errorf("decode sync.generic.config: %w", err) + } + + newConfig.Experimental.GenericSync = *genericSyncConfig + } + + return nil +} + +func convertEmbeddedEtcd(oldConfig EmbeddedEtcdValues, newConfig *config.Config) { + if oldConfig.Enabled { + newConfig.ControlPlane.BackingStore.Etcd.Embedded.Enabled = true + newConfig.ControlPlane.BackingStore.Etcd.Deploy.Enabled = false + newConfig.ControlPlane.BackingStore.Database.Embedded.Enabled = false + newConfig.ControlPlane.BackingStore.Database.External.Enabled = false + } + if oldConfig.MigrateFromEtcd { + newConfig.ControlPlane.BackingStore.Etcd.Embedded.MigrateFromDeployedEtcd = true + } +} + +func convertK8sSyncerConfig(oldConfig K8sSyncerValues, newConfig *config.Config) error { + newConfig.ControlPlane.StatefulSet.Persistence.AddVolumes = oldConfig.Volumes + if oldConfig.PriorityClassName != "" { + newConfig.ControlPlane.StatefulSet.Scheduling.PriorityClassName = oldConfig.PriorityClassName + } + newConfig.ControlPlane.StatefulSet.Scheduling.NodeSelector = oldConfig.NodeSelector + newConfig.ControlPlane.StatefulSet.Scheduling.Affinity = oldConfig.Affinity + if len(oldConfig.Tolerations) > 0 { + newConfig.ControlPlane.StatefulSet.Scheduling.Tolerations = oldConfig.Tolerations + } + newConfig.ControlPlane.StatefulSet.Pods.Annotations = oldConfig.PodAnnotations + newConfig.ControlPlane.StatefulSet.Pods.Labels = oldConfig.PodLabels + if len(oldConfig.PodSecurityContext) > 0 { + newConfig.ControlPlane.StatefulSet.Security.PodSecurityContext = oldConfig.PodSecurityContext + } + if len(oldConfig.SecurityContext) > 0 { + newConfig.ControlPlane.StatefulSet.Security.ContainerSecurityContext = oldConfig.SecurityContext + } + + return convertSyncerConfig(oldConfig.SyncerValues, newConfig) +} + +func convertSyncerConfig(oldConfig SyncerValues, newConfig *config.Config) error { + convertStatefulSetImage(oldConfig.Image, &newConfig.ControlPlane.StatefulSet.Image) + if oldConfig.ImagePullPolicy != "" { + newConfig.ControlPlane.StatefulSet.ImagePullPolicy = oldConfig.ImagePullPolicy + } + + newConfig.ControlPlane.StatefulSet.Env = append(newConfig.ControlPlane.StatefulSet.Env, oldConfig.Env...) + + if oldConfig.LivenessProbe.Enabled != nil { + newConfig.ControlPlane.StatefulSet.Probes.LivenessProbe.Enabled = *oldConfig.LivenessProbe.Enabled + } + if oldConfig.ReadinessProbe.Enabled != nil { + newConfig.ControlPlane.StatefulSet.Probes.StartupProbe.Enabled = *oldConfig.ReadinessProbe.Enabled + } + if oldConfig.ReadinessProbe.Enabled != nil { + newConfig.ControlPlane.StatefulSet.Probes.ReadinessProbe.Enabled = *oldConfig.ReadinessProbe.Enabled + } + + newConfig.ControlPlane.StatefulSet.Persistence.AddVolumeMounts = append(newConfig.ControlPlane.StatefulSet.Persistence.AddVolumeMounts, oldConfig.ExtraVolumeMounts...) + + if len(oldConfig.VolumeMounts) > 0 { + return fmt.Errorf("syncer.volumeMounts is not allowed anymore, please remove this field or use syncer.extraVolumeMounts") + } + if len(oldConfig.Resources.Limits) > 0 || len(oldConfig.Resources.Requests) > 0 { + newConfig.ControlPlane.StatefulSet.Resources = oldConfig.Resources + } + + newConfig.ControlPlane.Service.Annotations = oldConfig.ServiceAnnotations + if oldConfig.Replicas > 0 { + newConfig.ControlPlane.StatefulSet.HighAvailability.Replicas = oldConfig.Replicas + } + if oldConfig.KubeConfigContextName != "" { + newConfig.ExportKubeConfig.Context = oldConfig.KubeConfigContextName + } + applyStorage(oldConfig.Storage, newConfig) + + if len(oldConfig.Annotations) > 0 { + newConfig.ControlPlane.StatefulSet.Annotations = oldConfig.Annotations + } + if len(oldConfig.Labels) > 0 { + newConfig.ControlPlane.StatefulSet.Labels = oldConfig.Labels + } + + return convertSyncerExtraArgs(oldConfig.ExtraArgs, newConfig) +} + +func convertSyncerExtraArgs(extraArgs []string, newConfig *config.Config) error { + var err error + var flag, value string + + for { + flag, value, extraArgs, err = nextFlagValue(extraArgs) + if err != nil { + return err + } else if flag == "" { + break + } + + err = migrateFlag(flag, value, newConfig) + if err != nil { + return fmt.Errorf("migrate extra syncer flag --%s: %w", flag, err) + } + } + + return nil +} + +func migrateFlag(key, value string, newConfig *config.Config) error { + switch key { + case "pro-license-secret": + return fmt.Errorf("cannot be used directly, use proLicenseSecret value") + case "remote-kube-config": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.IsolatedControlPlane.Enabled = true + newConfig.Experimental.IsolatedControlPlane.KubeConfig = value + case "remote-namespace": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.IsolatedControlPlane.Namespace = value + case "remote-service-name": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.IsolatedControlPlane.Service = value + case "integrated-coredns": + return fmt.Errorf("cannot be used directly") + case "use-coredns-plugin": + return fmt.Errorf("cannot be used directly") + case "noop-syncer": + return fmt.Errorf("cannot be used directly") + case "sync-k8s-service": + return fmt.Errorf("cannot be used directly") + case "etcd-embedded": + return fmt.Errorf("cannot be used directly") + case "migrate-from": + return fmt.Errorf("cannot be used directly") + case "etcd-replicas": + return fmt.Errorf("cannot be used directly") + case "enforce-validating-hook": + return fmt.Errorf("cannot be used directly") + case "enforce-mutating-hook": + return fmt.Errorf("cannot be used directly") + case "kube-config-context-name": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.ExportKubeConfig.Context = value + case "sync": + return fmt.Errorf("cannot be used directly, use the sync.*.enabled options instead") + case "request-header-ca-cert": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.VirtualClusterKubeConfig.RequestHeaderCACert = value + case "client-ca-cert": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.VirtualClusterKubeConfig.ClientCACert = value + case "server-ca-cert": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.VirtualClusterKubeConfig.ServerCACert = value + case "server-ca-key": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.VirtualClusterKubeConfig.ServerCAKey = value + case "kube-config": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.VirtualClusterKubeConfig.KubeConfig = value + case "tls-san": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.ControlPlane.Proxy.ExtraSANs = append(newConfig.ControlPlane.Proxy.ExtraSANs, strings.Split(value, ",")...) + case "out-kube-config-secret": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.ExportKubeConfig.Secret.Name = value + case "out-kube-config-secret-namespace": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.ExportKubeConfig.Secret.Namespace = value + case "out-kube-config-server": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.ExportKubeConfig.Server = value + case "target-namespace": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.Experimental.SyncSettings.TargetNamespace = value + case "service-name": + return fmt.Errorf("this is not supported anymore, the service needs to be the vCluster name") + case "name": + return fmt.Errorf("this is not supported anymore, the name needs to be the helm release name") + case "set-owner": + if value == "false" { + newConfig.Experimental.SyncSettings.SetOwner = false + } + case "bind-address": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.ControlPlane.Proxy.BindAddress = value + case "port": + return fmt.Errorf("this is not supported anymore, the port needs to be 8443") + case "sync-all-nodes": + if value == "" || value == "true" { + newConfig.Sync.FromHost.Nodes.Selector.All = true + } else if value == "false" { + newConfig.Sync.FromHost.Nodes.Selector.All = false + } + case "enable-scheduler": + if value == "" || value == "true" { + newConfig.ControlPlane.Advanced.VirtualScheduler.Enabled = true + } else if value == "false" { + newConfig.ControlPlane.Advanced.VirtualScheduler.Enabled = false + } + case "disable-fake-kubelets": + if value == "" || value == "true" { + newConfig.Networking.Advanced.ProxyKubelets.ByHostname = false + newConfig.Networking.Advanced.ProxyKubelets.ByIP = false + } + case "fake-kubelet-ips": + if value == "" || value == "true" { + newConfig.Networking.Advanced.ProxyKubelets.ByIP = true + } else if value == "false" { + newConfig.Networking.Advanced.ProxyKubelets.ByIP = false + } + case "node-clear-image-status": + if value == "" || value == "true" { + newConfig.Sync.FromHost.Nodes.ClearImageStatus = true + } else if value == "false" { + newConfig.Sync.FromHost.Nodes.ClearImageStatus = false + } + case "translate-image": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.Sync.ToHost.Pods.TranslateImage = mergeIntoMap(newConfig.Sync.ToHost.Pods.TranslateImage, strings.Split(value, ",")) + case "enforce-node-selector": + if value == "false" { + return fmt.Errorf("this is not supported anymore, node selector will from now on always be enforced") + } + case "enforce-toleration": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.Sync.ToHost.Pods.EnforceTolerations = append(newConfig.Sync.ToHost.Pods.EnforceTolerations, strings.Split(value, ",")...) + case "node-selector": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.Sync.FromHost.Nodes.Enabled = true + newConfig.Sync.FromHost.Nodes.Selector.Labels = mergeIntoMap(newConfig.Sync.FromHost.Nodes.Selector.Labels, strings.Split(value, ",")) + case "service-account": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.ControlPlane.Advanced.WorkloadServiceAccount.Enabled = false + newConfig.ControlPlane.Advanced.WorkloadServiceAccount.Name = value + case "override-hosts": + if value == "" || value == "true" { + newConfig.Sync.ToHost.Pods.RewriteHosts.Enabled = true + } else if value == "false" { + newConfig.Sync.ToHost.Pods.RewriteHosts.Enabled = false + } + case "override-hosts-container-image": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.Sync.ToHost.Pods.RewriteHosts.InitContainerImage = value + case "cluster-domain": + if value == "" { + return fmt.Errorf("value is missing") + } + + newConfig.Networking.Advanced.ClusterDomain = value + case "leader-elect": + return fmt.Errorf("cannot be used directly") + case "lease-duration": + if value == "" { + return fmt.Errorf("value is missing") + } + i, err := strconv.Atoi(value) + if err != nil { + return err + } + newConfig.ControlPlane.StatefulSet.HighAvailability.LeaseDuration = i + case "renew-deadline": + if value == "" { + return fmt.Errorf("value is missing") + } + i, err := strconv.Atoi(value) + if err != nil { + return err + } + newConfig.ControlPlane.StatefulSet.HighAvailability.RenewDeadline = i + case "retry-period": + if value == "" { + return fmt.Errorf("value is missing") + } + i, err := strconv.Atoi(value) + if err != nil { + return err + } + newConfig.ControlPlane.StatefulSet.HighAvailability.RetryPeriod = i + case "disable-plugins": + return fmt.Errorf("this is not supported anymore") + case "plugin-listen-address": + return fmt.Errorf("this is not supported anymore") + case "default-image-registry": + return fmt.Errorf("shouldn't be used directly, use defaultImageRegistry instead") + case "enforce-pod-security-standard": + return fmt.Errorf("shouldn't be used directly, use isolation.podSecurityStandard instead") + case "plugins": + return fmt.Errorf("shouldn't be used directly") + case "sync-labels": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.SyncSettings.SyncLabels = append(newConfig.Experimental.SyncSettings.SyncLabels, strings.Split(value, ",")...) + case "map-virtual-service": + return fmt.Errorf("shouldn't be used directly") + case "map-host-service": + return fmt.Errorf("shouldn't be used directly") + case "host-metrics-bind-address": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.SyncSettings.HostMetricsBindAddress = value + case "virtual-metrics-bind-address": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.SyncSettings.VirtualMetricsBindAddress = value + case "mount-physical-host-paths": + if value == "" || value == "true" { + newConfig.ControlPlane.HostPathMapper.Enabled = true + } + case "multi-namespace-mode": + if value == "" || value == "true" { + newConfig.Experimental.MultiNamespaceMode.Enabled = true + } + case "namespace-labels": + if value == "" { + return fmt.Errorf("value is missing") + } + newConfig.Experimental.MultiNamespaceMode.NamespaceLabels = mergeIntoMap(newConfig.Experimental.MultiNamespaceMode.NamespaceLabels, strings.Split(value, ",")) + case "sync-all-configmaps": + if value == "" || value == "true" { + newConfig.Sync.ToHost.ConfigMaps.All = true + } + case "sync-all-secrets": + if value == "" || value == "true" { + newConfig.Sync.ToHost.Secrets.All = true + } + case "proxy-metrics-server": + if value == "" || value == "true" { + newConfig.Observability.Metrics.Proxy.Pods = true + newConfig.Observability.Metrics.Proxy.Nodes = true + } + case "service-account-token-secrets": + if value == "" || value == "true" { + newConfig.Sync.ToHost.Pods.UseSecretsForSATokens = true + } + case "sync-node-changes": + if value == "" || value == "true" { + newConfig.Sync.FromHost.Nodes.SyncBackChanges = true + } + default: + return fmt.Errorf("flag %s does not exist", key) + } + + return nil +} + +func applyStorage(oldConfig Storage, newConfig *config.Config) { + if oldConfig.Persistence != nil { + newConfig.ControlPlane.StatefulSet.Persistence.VolumeClaim.Enabled = config.StrBool(strconv.FormatBool(*oldConfig.Persistence)) + } + if oldConfig.Size != "" { + newConfig.ControlPlane.StatefulSet.Persistence.VolumeClaim.Size = oldConfig.Size + } + if oldConfig.ClassName != "" { + newConfig.ControlPlane.StatefulSet.Persistence.VolumeClaim.StorageClass = oldConfig.ClassName + } +} + +func convertVClusterConfig(oldConfig VClusterValues, retDistroCommon *config.DistroCommon, retDistroContainer *config.DistroContainer, newConfig *config.Config) error { + retDistroCommon.Env = oldConfig.Env + convertImage(oldConfig.Image, &retDistroContainer.Image) + if len(oldConfig.Resources) > 0 { + retDistroCommon.Resources = oldConfig.Resources + } + retDistroContainer.ExtraArgs = append(retDistroContainer.ExtraArgs, oldConfig.ExtraArgs...) + if oldConfig.ImagePullPolicy != "" { + retDistroContainer.ImagePullPolicy = oldConfig.ImagePullPolicy + } + + if len(oldConfig.BaseArgs) > 0 { + return fmt.Errorf("vcluster.baseArgs is not supported anymore, please use controlPlane.distro.k3s.command or controlPlane.distro.k3s.extraArgs instead") + } + if len(oldConfig.Command) > 0 { + return fmt.Errorf("vcluster.command is not supported anymore, please use controlPlane.distro.k3s.command or controlPlane.distro.k3s.extraArgs instead") + } + if oldConfig.PriorityClassName != "" { + return fmt.Errorf("vcluster.priorityClassName is not supported anymore, please manually upgrade this field") + } + + newConfig.ControlPlane.StatefulSet.Persistence.AddVolumeMounts = append(newConfig.ControlPlane.StatefulSet.Persistence.AddVolumeMounts, oldConfig.ExtraVolumeMounts...) + newConfig.ControlPlane.StatefulSet.Persistence.AddVolumes = append(newConfig.ControlPlane.StatefulSet.Persistence.AddVolumes, oldConfig.VolumeMounts...) + return nil +} + +func convertStatefulSetImage(image string, into *config.StatefulSetImage) { + if image == "" { + return + } + + imageSplitted := strings.Split(image, ":") + if len(imageSplitted) == 1 { + return + } + + into.Repository = strings.Join(imageSplitted[:len(imageSplitted)-1], ":") + into.Tag = imageSplitted[len(imageSplitted)-1] +} + +func convertImage(image string, into *config.Image) { + if image == "" { + return + } + + imageSplitted := strings.Split(image, ":") + if len(imageSplitted) == 1 { + return + } + + into.Repository = strings.Join(imageSplitted[:len(imageSplitted)-1], ":") + into.Tag = imageSplitted[len(imageSplitted)-1] +} + +func mergeIntoMap(retMap map[string]string, arr []string) map[string]string { + if retMap == nil { + retMap = map[string]string{} + } + + for _, value := range arr { + splitValue := strings.SplitN(strings.TrimSpace(value), "=", 2) + if len(splitValue) != 2 { + continue + } + + retMap[splitValue[0]] = splitValue[1] + } + + return retMap +} + +func nextFlagValue(args []string) (string, string, []string, error) { + if len(args) == 0 { + return "", "", nil, nil + } else if !strings.HasPrefix(args[0], "--") { + return "", "", nil, fmt.Errorf("unexpected extra argument %s", args[0]) + } + + flagName := strings.TrimPrefix(args[0], "--") + args = args[1:] + + // check if flag has value + if strings.Contains(flagName, "=") { + splittedFlag := strings.SplitN(flagName, "=", 2) + return splittedFlag[0], splittedFlag[1], args, nil + } else if len(args) > 0 && !strings.HasPrefix(args[0], "--") { + value := args[0] + args = args[1:] + return flagName, value, args, nil + } + + return flagName, "", args, nil +} + +func convertObject(from, to interface{}) error { + out, err := json.Marshal(from) + if err != nil { + return err + } + + return json.Unmarshal(out, to) +} diff --git a/vendor/github.com/loft-sh/vcluster/pkg/config/legacy_options.go b/vendor/github.com/loft-sh/vcluster/pkg/config/legacyconfig/options.go similarity index 99% rename from vendor/github.com/loft-sh/vcluster/pkg/config/legacy_options.go rename to vendor/github.com/loft-sh/vcluster/pkg/config/legacyconfig/options.go index 1b1cda63..e229d02d 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/config/legacy_options.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/config/legacyconfig/options.go @@ -1,8 +1,4 @@ -package config - -const ( - DefaultHostsRewriteImage = "library/alpine:3.13.1" -) +package legacyconfig // LegacyVirtualClusterOptions holds the cmd flags type LegacyVirtualClusterOptions struct { diff --git a/vendor/github.com/loft-sh/vcluster/pkg/controllers/deploy/start.go b/vendor/github.com/loft-sh/vcluster/pkg/controllers/deploy/start.go index 43c75762..526d1dca 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/controllers/deploy/start.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/controllers/deploy/start.go @@ -1,6 +1,8 @@ package deploy import ( + "time" + "github.com/loft-sh/log" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd" "github.com/loft-sh/vcluster/pkg/config" @@ -37,8 +39,8 @@ func RegisterInitManifestsController(controllerCtx *config.ControllerContext) er for { result, err := controller.Apply(controllerCtx.Context, controllerCtx.Config) if err != nil { - klog.Errorf("Error reconciling init_configmap: %v", err) - break + klog.Errorf("Error deploying manifests: %v", err) + time.Sleep(time.Second * 10) } else if !result.Requeue { break } diff --git a/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/syncer.go b/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/syncer.go index cf38c5f4..d8dfbcf5 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/syncer.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/syncer.go @@ -23,32 +23,57 @@ import ( func New(ctx *synccontext.RegisterContext) (syncer.Object, error) { t := translator.NewNamespacedTranslator(ctx, "configmap", &corev1.ConfigMap{}) - t.SetNameTranslator(ConfigMapNameTranslator) + return &configMapSyncer{ NamespacedTranslator: t, - syncAllConfigMaps: ctx.Config.Sync.ToHost.ConfigMaps.All, + syncAllConfigMaps: ctx.Config.Sync.ToHost.ConfigMaps.All, + multiNamespaceMode: ctx.Config.Experimental.MultiNamespaceMode.Enabled, }, nil } type configMapSyncer struct { translator.NamespacedTranslator - syncAllConfigMaps bool + syncAllConfigMaps bool + multiNamespaceMode bool } -func ConfigMapNameTranslator(vNN types.NamespacedName, _ client.Object) string { - name := translate.Default.PhysicalName(vNN.Name, vNN.Namespace) - if name == "kube-root-ca.crt" { - name = translate.SafeConcatName("vcluster", "kube-root-ca.crt", "x", translate.VClusterName) +var _ syncer.IndicesRegisterer = &configMapSyncer{} + +func (s *configMapSyncer) VirtualToHost(ctx context.Context, req types.NamespacedName, vObj client.Object) types.NamespacedName { + if s.multiNamespaceMode && req.Name == "kube-root-ca.crt" { + return types.NamespacedName{ + Name: translate.SafeConcatName("vcluster", "kube-root-ca.crt", "x", translate.VClusterName), + Namespace: s.NamespacedTranslator.VirtualToHost(ctx, req, vObj).Namespace, + } } - return name + + return s.NamespacedTranslator.VirtualToHost(ctx, req, vObj) } -var _ syncer.IndicesRegisterer = &configMapSyncer{} +func (s *configMapSyncer) HostToVirtual(ctx context.Context, req types.NamespacedName, pObj client.Object) types.NamespacedName { + if s.multiNamespaceMode && req.Name == translate.SafeConcatName("vcluster", "kube-root-ca.crt", "x", translate.VClusterName) { + return types.NamespacedName{ + Name: "kube-root-ca.crt", + Namespace: s.NamespacedTranslator.HostToVirtual(ctx, req, pObj).Namespace, + } + } else if s.multiNamespaceMode && req.Name == "kube-root-ca.crt" { + // ignore kube-root-ca.crt from host + return types.NamespacedName{} + } + + return s.NamespacedTranslator.HostToVirtual(ctx, req, pObj) +} func (s *configMapSyncer) RegisterIndices(ctx *synccontext.RegisterContext) error { - err := s.NamespacedTranslator.RegisterIndices(ctx) + err := ctx.VirtualManager.GetFieldIndexer().IndexField(ctx.Context, &corev1.ConfigMap{}, constants.IndexByPhysicalName, func(rawObj client.Object) []string { + if s.multiNamespaceMode && rawObj.GetName() == "kube-root-ca.crt" { + return []string{translate.Default.PhysicalNamespace(rawObj.GetNamespace()) + "/" + translate.SafeConcatName("vcluster", "kube-root-ca.crt", "x", translate.VClusterName)} + } + + return []string{translate.Default.PhysicalNamespace(rawObj.GetNamespace()) + "/" + translate.Default.PhysicalName(rawObj.GetName(), rawObj.GetNamespace())} + }) if err != nil { return err } @@ -56,7 +81,7 @@ func (s *configMapSyncer) RegisterIndices(ctx *synccontext.RegisterContext) erro // index pods by their used config maps return ctx.VirtualManager.GetFieldIndexer().IndexField(ctx.Context, &corev1.Pod{}, constants.IndexByConfigMap, func(rawObj client.Object) []string { pod := rawObj.(*corev1.Pod) - return ConfigNamesFromPod(pod) + return configNamesFromPod(pod) }) } @@ -128,7 +153,7 @@ func mapPods(_ context.Context, obj client.Object) []reconcile.Request { } requests := []reconcile.Request{} - names := ConfigNamesFromPod(pod) + names := configNamesFromPod(pod) for _, name := range names { splitted := strings.Split(name, "/") if len(splitted) == 2 { diff --git a/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/translate.go b/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/translate.go index 2c9090ea..7133d866 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/translate.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/translate.go @@ -6,11 +6,14 @@ import ( "github.com/loft-sh/vcluster/pkg/controllers/syncer/translator" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) func (s *configMapSyncer) translate(ctx context.Context, vObj client.Object) *corev1.ConfigMap { - return s.TranslateMetadata(ctx, vObj).(*corev1.ConfigMap) + pObj := s.TranslateMetadata(ctx, vObj).(*corev1.ConfigMap) + pObj.SetName(s.VirtualToHost(ctx, types.NamespacedName{Name: vObj.GetName(), Namespace: vObj.GetNamespace()}, vObj).Name) + return pObj } func (s *configMapSyncer) translateUpdate(ctx context.Context, pObj, vObj *corev1.ConfigMap) *corev1.ConfigMap { diff --git a/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/util.go b/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/util.go index 02432dd3..c33e47f7 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/util.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps/util.go @@ -5,16 +5,16 @@ import ( corev1 "k8s.io/api/core/v1" ) -func ConfigNamesFromPod(pod *corev1.Pod) []string { +func configNamesFromPod(pod *corev1.Pod) []string { configMaps := []string{} for _, c := range pod.Spec.Containers { - configMaps = append(configMaps, ConfigNamesFromContainer(pod.Namespace, &c)...) + configMaps = append(configMaps, configNamesFromContainer(pod.Namespace, &c)...) } for _, c := range pod.Spec.InitContainers { - configMaps = append(configMaps, ConfigNamesFromContainer(pod.Namespace, &c)...) + configMaps = append(configMaps, configNamesFromContainer(pod.Namespace, &c)...) } for _, c := range pod.Spec.EphemeralContainers { - configMaps = append(configMaps, ConfigNamesFromEphemeralContainer(pod.Namespace, &c)...) + configMaps = append(configMaps, configNamesFromEphemeralContainer(pod.Namespace, &c)...) } for i := range pod.Spec.Volumes { if pod.Spec.Volumes[i].ConfigMap != nil { @@ -31,7 +31,7 @@ func ConfigNamesFromPod(pod *corev1.Pod) []string { return translate.UniqueSlice(configMaps) } -func ConfigNamesFromContainer(namespace string, container *corev1.Container) []string { +func configNamesFromContainer(namespace string, container *corev1.Container) []string { configNames := []string{} for _, env := range container.Env { if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name != "" { @@ -46,7 +46,7 @@ func ConfigNamesFromContainer(namespace string, container *corev1.Container) []s return configNames } -func ConfigNamesFromEphemeralContainer(namespace string, container *corev1.EphemeralContainer) []string { +func configNamesFromEphemeralContainer(namespace string, container *corev1.EphemeralContainer) []string { configNames := []string{} for _, env := range container.Env { if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name != "" { diff --git a/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/pods/syncer.go b/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/pods/syncer.go index d3e3df5f..f640f519 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/pods/syncer.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/pods/syncer.go @@ -313,7 +313,7 @@ func (s *podSyncer) Sync(ctx *synccontext.SyncContext, pObj client.Object, vObj // translate services to environment variables serviceEnv := translatepods.ServicesToEnvironmentVariables(vPod.Spec.EnableServiceLinks, ptrServiceList, kubeIP) for i := range vPod.Spec.EphemeralContainers { - envVar, envFrom := translatepods.ContainerEnv(vPod.Spec.EphemeralContainers[i].Env, vPod.Spec.EphemeralContainers[i].EnvFrom, vPod, serviceEnv) + envVar, envFrom := s.podTranslator.TranslateContainerEnv(vPod.Spec.EphemeralContainers[i].Env, vPod.Spec.EphemeralContainers[i].EnvFrom, vPod, serviceEnv) vPod.Spec.EphemeralContainers[i].Env = envVar vPod.Spec.EphemeralContainers[i].EnvFrom = envFrom } diff --git a/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/pods/translate/translator.go b/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/pods/translate/translator.go index a4d8899e..b9a424af 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/pods/translate/translator.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/controllers/resources/pods/translate/translator.go @@ -11,7 +11,6 @@ import ( "strconv" "strings" - "github.com/loft-sh/vcluster/pkg/controllers/resources/configmaps" "github.com/loft-sh/vcluster/pkg/controllers/resources/priorityclasses" synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" "github.com/loft-sh/vcluster/pkg/util/loghelper" @@ -22,7 +21,6 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" @@ -55,6 +53,8 @@ var ( type Translator interface { Translate(ctx context.Context, vPod *corev1.Pod, services []*corev1.Service, dnsIP string, kubeIP string) (*corev1.Pod, error) Diff(ctx context.Context, vPod, pPod *corev1.Pod) (*corev1.Pod, error) + + TranslateContainerEnv(envVar []corev1.EnvVar, envFrom []corev1.EnvFromSource, vPod *corev1.Pod, serviceEnvMap map[string]string) ([]corev1.EnvVar, []corev1.EnvFromSource) } func NewTranslator(ctx *synccontext.RegisterContext, eventRecorder record.EventRecorder) (Translator, error) { @@ -79,6 +79,8 @@ func NewTranslator(ctx *synccontext.RegisterContext, eventRecorder record.EventR defaultImageRegistry: ctx.Config.ControlPlane.Advanced.DefaultImageRegistry, + multiNamespaceMode: ctx.Config.Experimental.MultiNamespaceMode.Enabled, + serviceAccountSecretsEnabled: ctx.Config.Sync.ToHost.Pods.UseSecretsForSATokens, clusterDomain: ctx.Config.Networking.Advanced.ClusterDomain, serviceAccount: ctx.Config.ControlPlane.Advanced.WorkloadServiceAccount.Name, @@ -107,6 +109,8 @@ type translator struct { defaultImageRegistry string + multiNamespaceMode bool + // this is needed for host path mapper (legacy) mountPhysicalHostPaths bool @@ -268,7 +272,7 @@ func (t *translator) Translate(ctx context.Context, vPod *corev1.Pod, services [ // translate containers for i := range pPod.Spec.Containers { - envVar, envFrom := ContainerEnv(pPod.Spec.Containers[i].Env, pPod.Spec.Containers[i].EnvFrom, vPod, serviceEnv) + envVar, envFrom := t.TranslateContainerEnv(pPod.Spec.Containers[i].Env, pPod.Spec.Containers[i].EnvFrom, vPod, serviceEnv) pPod.Spec.Containers[i].Env = envVar pPod.Spec.Containers[i].EnvFrom = envFrom pPod.Spec.Containers[i].Image = t.imageTranslator.Translate(pPod.Spec.Containers[i].Image) @@ -276,7 +280,7 @@ func (t *translator) Translate(ctx context.Context, vPod *corev1.Pod, services [ // translate init containers for i := range pPod.Spec.InitContainers { - envVar, envFrom := ContainerEnv(pPod.Spec.InitContainers[i].Env, pPod.Spec.InitContainers[i].EnvFrom, vPod, serviceEnv) + envVar, envFrom := t.TranslateContainerEnv(pPod.Spec.InitContainers[i].Env, pPod.Spec.InitContainers[i].EnvFrom, vPod, serviceEnv) pPod.Spec.InitContainers[i].Env = envVar pPod.Spec.InitContainers[i].EnvFrom = envFrom pPod.Spec.InitContainers[i].Image = t.imageTranslator.Translate(pPod.Spec.InitContainers[i].Image) @@ -284,7 +288,7 @@ func (t *translator) Translate(ctx context.Context, vPod *corev1.Pod, services [ // translate ephemeral containers for i := range pPod.Spec.EphemeralContainers { - envVar, envFrom := ContainerEnv(pPod.Spec.EphemeralContainers[i].Env, pPod.Spec.EphemeralContainers[i].EnvFrom, vPod, serviceEnv) + envVar, envFrom := t.TranslateContainerEnv(pPod.Spec.EphemeralContainers[i].Env, pPod.Spec.EphemeralContainers[i].EnvFrom, vPod, serviceEnv) pPod.Spec.EphemeralContainers[i].Env = envVar pPod.Spec.EphemeralContainers[i].EnvFrom = envFrom pPod.Spec.EphemeralContainers[i].Image = t.imageTranslator.Translate(pPod.Spec.EphemeralContainers[i].Image) @@ -352,7 +356,11 @@ func (t *translator) translateVolumes(ctx context.Context, pPod *corev1.Pod, vPo for i := range pPod.Spec.Volumes { if pPod.Spec.Volumes[i].ConfigMap != nil { - pPod.Spec.Volumes[i].ConfigMap.Name = configmaps.ConfigMapNameTranslator(types.NamespacedName{Name: pPod.Spec.Volumes[i].ConfigMap.Name, Namespace: vPod.Namespace}, nil) + if t.multiNamespaceMode && pPod.Spec.Volumes[i].ConfigMap.Name == "kube-root-ca.crt" { + pPod.Spec.Volumes[i].ConfigMap.Name = translate.SafeConcatName("vcluster", "kube-root-ca.crt", "x", translate.VClusterName) + } else { + pPod.Spec.Volumes[i].ConfigMap.Name = translate.Default.PhysicalName(pPod.Spec.Volumes[i].ConfigMap.Name, vPod.Namespace) + } } if pPod.Spec.Volumes[i].Secret != nil { pPod.Spec.Volumes[i].Secret.SecretName = translate.Default.PhysicalName(pPod.Spec.Volumes[i].Secret.SecretName, vPod.Namespace) @@ -575,7 +583,7 @@ func translateFieldRef(fieldSelector *corev1.ObjectFieldSelector) { } } -func ContainerEnv(envVar []corev1.EnvVar, envFrom []corev1.EnvFromSource, vPod *corev1.Pod, serviceEnvMap map[string]string) ([]corev1.EnvVar, []corev1.EnvFromSource) { +func (t *translator) TranslateContainerEnv(envVar []corev1.EnvVar, envFrom []corev1.EnvFromSource, vPod *corev1.Pod, serviceEnvMap map[string]string) ([]corev1.EnvVar, []corev1.EnvFromSource) { envNameMap := make(map[string]struct{}) for j, env := range envVar { translateDownwardAPI(&envVar[j]) @@ -590,7 +598,11 @@ func ContainerEnv(envVar []corev1.EnvVar, envFrom []corev1.EnvFromSource, vPod * } for j, from := range envFrom { if from.ConfigMapRef != nil && from.ConfigMapRef.Name != "" { - envFrom[j].ConfigMapRef.Name = translate.Default.PhysicalName(from.ConfigMapRef.Name, vPod.Namespace) + if t.multiNamespaceMode && envFrom[j].ConfigMapRef.Name == "kube-root-ca.crt" { + envFrom[j].ConfigMapRef.Name = translate.SafeConcatName("vcluster", "kube-root-ca.crt", "x", translate.VClusterName) + } else { + envFrom[j].ConfigMapRef.Name = translate.Default.PhysicalName(from.ConfigMapRef.Name, vPod.Namespace) + } } if from.SecretRef != nil && from.SecretRef.Name != "" { envFrom[j].SecretRef.Name = translate.Default.PhysicalName(from.SecretRef.Name, vPod.Namespace) diff --git a/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/syncer.go b/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/syncer.go index f115a2a2..899c1290 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/syncer.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/syncer.go @@ -267,7 +267,7 @@ func (r *SyncController) excludePhysical(ctx context.Context, pObj, vObj client. return false, fmt.Errorf("failed to check if physical object is managed: %w", err) } else if !isManaged { if vObj != nil { - msg := "conflict: cannot sync virtual object as unmanaged physical object exists with desired name" + msg := fmt.Sprintf("conflict: cannot sync virtual object %s/%s as unmanaged physical object %s/%s exists with desired name", vObj.GetNamespace(), vObj.GetName(), pObj.GetNamespace(), pObj.GetName()) r.vEventRecorder.Eventf(vObj, "Warning", "SyncError", msg) return false, fmt.Errorf(msg) } diff --git a/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/translator/namespaced_translator.go b/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/translator/namespaced_translator.go index de6d817d..38dd7a0e 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/translator/namespaced_translator.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/translator/namespaced_translator.go @@ -34,7 +34,6 @@ func NewNamespacedTranslator(ctx *context.RegisterContext, name string, obj clie type namespacedTranslator struct { name string - nameTranslator translate.PhysicalNamespacedNameTranslator excludedAnnotations []string syncedLabels []string @@ -44,10 +43,6 @@ type namespacedTranslator struct { eventRecorder record.EventRecorder } -func (n *namespacedTranslator) SetNameTranslator(nameTranslator translate.PhysicalNamespacedNameTranslator) { - n.nameTranslator = nameTranslator -} - func (n *namespacedTranslator) EventRecorder() record.EventRecorder { return n.eventRecorder } @@ -103,11 +98,8 @@ func (n *namespacedTranslator) IsManaged(_ context2.Context, pObj client.Object) return translate.Default.IsManaged(pObj), nil } -func (n *namespacedTranslator) VirtualToHost(_ context2.Context, req types.NamespacedName, vObj client.Object) types.NamespacedName { +func (n *namespacedTranslator) VirtualToHost(_ context2.Context, req types.NamespacedName, _ client.Object) types.NamespacedName { name := translate.Default.PhysicalName(req.Name, req.Namespace) - if n.nameTranslator != nil { - name = n.nameTranslator(req, vObj) - } return types.NamespacedName{ Namespace: translate.Default.PhysicalNamespace(req.Namespace), diff --git a/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/translator/translator.go b/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/translator/translator.go index 5173ada0..0db6c0a8 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/translator/translator.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/controllers/syncer/translator/translator.go @@ -4,7 +4,6 @@ import ( "context" syncercontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" - "github.com/loft-sh/vcluster/pkg/util/translate" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" @@ -56,7 +55,4 @@ type NamespacedTranslator interface { // SyncToHostUpdate updates the given pObj (if not nil) in the target namespace SyncToHostUpdate(ctx *syncercontext.SyncContext, vObj, pObj client.Object) (ctrl.Result, error) - - // SetNameTranslator is a function to override default VirtualToHost name translation - SetNameTranslator(nameTranslator translate.PhysicalNamespacedNameTranslator) } diff --git a/vendor/github.com/loft-sh/vcluster/pkg/plugin/v1/plugin.go b/vendor/github.com/loft-sh/vcluster/pkg/plugin/v1/plugin.go index 36f3904d..12dd2a4d 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/plugin/v1/plugin.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/plugin/v1/plugin.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/loft-sh/vcluster/pkg/config" + "github.com/loft-sh/vcluster/pkg/config/legacyconfig" plugintypes "github.com/loft-sh/vcluster/pkg/plugin/types" "github.com/loft-sh/vcluster/pkg/util/kubeconfig" "github.com/loft-sh/vcluster/pkg/util/loghelper" @@ -166,7 +166,7 @@ func (m *Manager) Start( virtualKubeConfig *rest.Config, physicalKubeConfig *rest.Config, syncerConfig *clientcmdapi.Config, - options *config.LegacyVirtualClusterOptions, + options *legacyconfig.LegacyVirtualClusterOptions, ) error { // set if we have plugins m.hasPlugins.Store(len(options.Plugins) > 0) @@ -239,7 +239,7 @@ func (m *Manager) Start( return m.waitForPlugins(ctx, options) } -func (m *Manager) waitForPlugins(ctx context.Context, options *config.LegacyVirtualClusterOptions) error { +func (m *Manager) waitForPlugins(ctx context.Context, options *legacyconfig.LegacyVirtualClusterOptions) error { for _, plugin := range options.Plugins { klog.Infof("Waiting for plugin %s to register...", plugin) err := wait.PollUntilContextTimeout(ctx, time.Millisecond*100, time.Minute*10, true, func(context.Context) (done bool, err error) { diff --git a/vendor/github.com/loft-sh/vcluster/pkg/plugin/v2/config.go b/vendor/github.com/loft-sh/vcluster/pkg/plugin/v2/config.go index 9b565809..a55d5352 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/plugin/v2/config.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/plugin/v2/config.go @@ -30,13 +30,8 @@ type InitConfigPro struct { // PluginConfig is the config the plugin sends back to the syncer type PluginConfig struct { - ClientHooks []*ClientHook `json:"clientHooks,omitempty"` - Interceptors *InterceptorConfig `json:"interceptorConfig,omitempty"` -} - -type InterceptorConfig struct { - Port int `json:"port"` - Interceptors []Interceptor `json:"interceptors"` + ClientHooks []*ClientHook `json:"clientHooks,omitempty"` + Interceptors map[string][]InterceptorRule `json:"interceptors,omitempty"` } type ClientHook struct { @@ -45,9 +40,8 @@ type ClientHook struct { Types []string `json:"types,omitempty"` } -type Interceptor struct { +type InterceptorRule struct { APIGroups []string `json:"apiGroups,omitempty"` - HandlerName string `json:"name,omitempty"` Resources []string `json:"resources,omitempty"` ResourceNames []string `json:"resourceNames,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` diff --git a/vendor/github.com/loft-sh/vcluster/pkg/plugin/v2/plugin.go b/vendor/github.com/loft-sh/vcluster/pkg/plugin/v2/plugin.go index 861b7f1b..410bf5b3 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/plugin/v2/plugin.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/plugin/v2/plugin.go @@ -108,7 +108,6 @@ func (m *Manager) Start( for _, vClusterPlugin := range m.Plugins { // build the start request initRequest, err := m.buildInitRequest(filepath.Dir(vClusterPlugin.Path), syncerConfig, vConfig, port) - port++ if err != nil { return fmt.Errorf("build start request: %w", err) @@ -139,14 +138,14 @@ func (m *Manager) Start( } // register Interceptors - if pluginConfig.Interceptors != nil { - err = m.registerInterceptors(*pluginConfig.Interceptors) - if err != nil { - return fmt.Errorf("error adding interceptor for plugin %s: %w", vClusterPlugin.Path, err) - } + err = m.registerInterceptors(pluginConfig.Interceptors, port) + if err != nil { + return fmt.Errorf("error adding interceptor for plugin %s: %w", vClusterPlugin.Path, err) } klog.FromContext(ctx).Info("Successfully loaded plugin", "plugin", vClusterPlugin.Path) + + port++ } return nil @@ -332,32 +331,32 @@ func (m *Manager) HasPlugins() bool { return len(m.Plugins) > 0 } -func validateInterceptor(interceptor Interceptor) error { +func validateInterceptor(interceptor InterceptorRule, name string) error { if len(interceptor.Verbs) == 0 { - return fmt.Errorf("verb is empty in interceptor plugin %s ", interceptor.HandlerName) + return fmt.Errorf("verb is empty in interceptor plugin %s ", name) } // check for wildcards and extra names, which should not be allowed if slices.Contains(interceptor.Resources, "*") && len(interceptor.Resources) > 1 { - return fmt.Errorf("error while loading the plugins, interceptor for handler %s defines both * and other resources, or is empty. please either specify * or a list of resource", interceptor.HandlerName) + return fmt.Errorf("error while loading the plugins, interceptor for handler %s defines both * and other resources, or is empty. please either specify * or a list of resource", name) } if slices.Contains(interceptor.APIGroups, "*") && len(interceptor.APIGroups) > 1 { - return fmt.Errorf("error while loading the plugins, interceptor for handler %s defines both * and other apigroups, or is empty. please either specify * or a list of apigroups", interceptor.HandlerName) + return fmt.Errorf("error while loading the plugins, interceptor for handler %s defines both * and other apigroups, or is empty. please either specify * or a list of apigroups", name) } // make sure that if we don't have any nonresourceurl we at least have some group + resource if len(interceptor.NonResourceURLs) == 0 { // check for wildcards and extra names, which should not be allowed if len(interceptor.Resources) == 0 { - return fmt.Errorf("error while loading the plugins, interceptor for handler %s defines both * and other resources, or is empty. please either specify * or a list of resource", interceptor.HandlerName) + return fmt.Errorf("error while loading the plugins, interceptor for handler %s defines both * and other resources, or is empty. please either specify * or a list of resource", name) } if len(interceptor.APIGroups) == 0 { - return fmt.Errorf("error while loading the plugins, interceptor for handler %s defines both * and other apigroups, or is empty. please either specify * or a list of apigroups", interceptor.HandlerName) + return fmt.Errorf("error while loading the plugins, interceptor for handler %s defines both * and other apigroups, or is empty. please either specify * or a list of apigroups", name) } } if (slices.Contains(interceptor.Verbs, "*") && len(interceptor.Verbs) > 1) || len(interceptor.Verbs) == 0 { - return fmt.Errorf("error while loading the plugins, interceptor for handler %s defines both * and other verbs, or is empty. please either specify * or a list of verb", interceptor.HandlerName) + return fmt.Errorf("error while loading the plugins, interceptor for handler %s defines both * and other verbs, or is empty. please either specify * or a list of verb", name) } // having a wildcard char not at the end is forbidden for _, nonResourceURL := range interceptor.NonResourceURLs { @@ -370,31 +369,33 @@ func validateInterceptor(interceptor Interceptor) error { return nil } -func (m *Manager) registerInterceptors(interceptors InterceptorConfig) error { +func (m *Manager) registerInterceptors(interceptors map[string][]InterceptorRule, port int) error { // register the interceptors - for _, interceptorsInfos := range interceptors.Interceptors { + for name, interceptorRules := range interceptors { // make sure that it is valid - if err := validateInterceptor(interceptorsInfos); err != nil { - return err - } + for _, rule := range interceptorRules { + if err := validateInterceptor(rule, name); err != nil { + return err + } - // register resource interceptors for each verb - err := m.registerResourceInterceptor(interceptors.Port, interceptorsInfos) - if err != nil { - return err - } + // register resource interceptors for each verb + err := m.registerResourceInterceptor(port, rule, name) + if err != nil { + return err + } - // register nonresourceurls interceptors for each verb - err = m.registerNonResourceURL(interceptors, interceptorsInfos) - if err != nil { - return err + // register nonresourceurls interceptors for each verb + err = m.registerNonResourceURL(port, rule, name) + if err != nil { + return err + } } } return nil } -func (m *Manager) registerResourceInterceptor(port int, interceptorsInfos Interceptor) error { +func (m *Manager) registerResourceInterceptor(port int, interceptorsInfos InterceptorRule, interceptorName string) error { // add all group/version/verb/resourceName tuples to the map // each group if m.hasConflictWithExistingWildcard(interceptorsInfos.APIGroups, interceptorsInfos.Resources, interceptorsInfos.Verbs, interceptorsInfos.ResourceNames) { @@ -431,10 +432,10 @@ func (m *Manager) registerResourceInterceptor(port int, interceptorsInfos Interc // now add the specific resource names if len(interceptorsInfos.ResourceNames) == 0 { // empty slice means everything is allowed - m.ResourceInterceptorsPorts[apigroup][resource][verb]["*"] = portHandlerName{handlerName: interceptorsInfos.HandlerName, port: port} + m.ResourceInterceptorsPorts[apigroup][resource][verb]["*"] = portHandlerName{handlerName: interceptorName, port: port} } else { for _, name := range interceptorsInfos.ResourceNames { - m.ResourceInterceptorsPorts[apigroup][resource][verb][name] = portHandlerName{handlerName: interceptorsInfos.HandlerName, port: port} + m.ResourceInterceptorsPorts[apigroup][resource][verb][name] = portHandlerName{handlerName: interceptorName, port: port} } } } @@ -563,7 +564,7 @@ func hasResourceNameConflit(existing map[string]portHandlerName, resourceName st return ok } -func (m *Manager) registerNonResourceURL(interceptors InterceptorConfig, interceptorsInfos Interceptor) error { +func (m *Manager) registerNonResourceURL(port int, interceptorsInfos InterceptorRule, interceptorName string) error { // register nonresourceurls for each verb for _, nonResourceURL := range interceptorsInfos.NonResourceURLs { // ignore empty resources @@ -575,7 +576,7 @@ func (m *Manager) registerNonResourceURL(interceptors InterceptorConfig, interce return fmt.Errorf("error while loading the plugins, multiple interceptor plugins are registered for the same non resource url %s and verb %s", nonResourceURL, v) } - m.NonResourceInterceptorsPorts[nonResourceURL][v] = portHandlerName{port: interceptors.Port, handlerName: interceptorsInfos.HandlerName} + m.NonResourceInterceptorsPorts[nonResourceURL][v] = portHandlerName{port: port, handlerName: interceptorName} } } return nil @@ -826,7 +827,7 @@ func (m *Manager) WithInterceptors(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { info, ok := request.RequestInfoFrom(r.Context()) if !ok { - klog.V(1).Info("could not determine the infos from the request, serving next handler") + klog.V(1).Infof("could not determine the infos from the request %s, serving next handler", r.URL.Path) next.ServeHTTP(w, r) return } diff --git a/vendor/github.com/loft-sh/vcluster/pkg/procli/client.go b/vendor/github.com/loft-sh/vcluster/pkg/procli/client.go index a720de49..c8243e01 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/procli/client.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/procli/client.go @@ -10,9 +10,6 @@ import ( managementv1 "github.com/loft-sh/api/v3/pkg/apis/management/v1" loftclient "github.com/loft-sh/loftctl/v3/pkg/client" - "github.com/loft-sh/loftctl/v3/pkg/client/helper" - "github.com/loft-sh/loftctl/v3/pkg/client/naming" - kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -26,11 +23,6 @@ type Client interface { Self() *managementv1.Self } -type VirtualClusterInstanceProject struct { - VirtualCluster *managementv1.VirtualClusterInstance - Project *managementv1.Project -} - var ErrConfigNotFound = errors.New("couldn't find vCluster.Pro config") func CreateProClient() (Client, error) { @@ -84,79 +76,3 @@ type client struct { func (c *client) Self() *managementv1.Self { return c.self.DeepCopy() } - -func ListVClusters(ctx context.Context, baseClient Client, virtualClusterName, projectName string) ([]VirtualClusterInstanceProject, error) { - managementClient, err := baseClient.Management() - if err != nil { - return nil, err - } - - // gather projects and virtual cluster instances to access - projects := []*managementv1.Project{} - if projectName != "" { - project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - return nil, fmt.Errorf("couldn't find or access project %s", projectName) - } - - return nil, err - } - - projects = append(projects, project) - } else { - projectsList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) - if err != nil || len(projectsList.Items) == 0 { - return nil, err - } - - for _, p := range projectsList.Items { - proj := p - projects = append(projects, &proj) - } - } - - // gather space instances in those projects - virtualClusters := []VirtualClusterInstanceProject{} - for _, p := range projects { - if virtualClusterName != "" { - virtualClusterInstance, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(naming.ProjectNamespace(p.Name)).Get(ctx, virtualClusterName, metav1.GetOptions{}) - if err != nil { - continue - } - - virtualClusters = append(virtualClusters, VirtualClusterInstanceProject{ - VirtualCluster: virtualClusterInstance, - Project: p, - }) - } else { - virtualClusterInstanceList, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(naming.ProjectNamespace(p.Name)).List(ctx, metav1.ListOptions{}) - if err != nil { - continue - } - - for _, virtualClusterInstance := range virtualClusterInstanceList.Items { - s := virtualClusterInstance - virtualClusters = append(virtualClusters, VirtualClusterInstanceProject{ - VirtualCluster: &s, - Project: p, - }) - } - } - } - - // filter out virtual clusters we cannot access - newVirtualClusters := []VirtualClusterInstanceProject{} - for _, virtualCluster := range virtualClusters { - canAccess, err := helper.CanAccessVirtualClusterInstance(managementClient, virtualCluster.VirtualCluster.Namespace, virtualCluster.VirtualCluster.Name) - if err != nil { - return nil, err - } else if !canAccess { - continue - } - - newVirtualClusters = append(newVirtualClusters, virtualCluster) - } - - return newVirtualClusters, nil -} diff --git a/vendor/github.com/loft-sh/vcluster/pkg/server/server.go b/vendor/github.com/loft-sh/vcluster/pkg/server/server.go index 52a41605..57572021 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/server/server.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/server/server.go @@ -353,7 +353,6 @@ func createCachedClient(ctx context.Context, config *rest.Config, namespace stri func (s *Server) buildHandlerChain(serverConfig *server.Config) http.Handler { defaultHandler := DefaultBuildHandlerChain(s.handler, serverConfig) defaultHandler = filters.WithNodeName(defaultHandler, s.currentNamespace, s.fakeKubeletIPs, s.cachedVirtualClient, s.currentNamespaceClient) - defaultHandler = plugin.DefaultManager.WithInterceptors(defaultHandler) return defaultHandler } @@ -424,6 +423,10 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *server.Config) http.Ha handler = genericapifilters.WithTracing(handler, c.TracerProvider) } handler = genericapifilters.WithLatencyTrackers(handler) + + // this is for the plugins to be able to catch the requests with the info in the + // context + handler = plugin.DefaultManager.WithInterceptors(handler) handler = genericapifilters.WithRequestInfo(handler, c.RequestInfoResolver) handler = genericapifilters.WithRequestReceivedTimestamp(handler) diff --git a/vendor/github.com/loft-sh/vcluster/pkg/setup/config.go b/vendor/github.com/loft-sh/vcluster/pkg/setup/config.go index 3c694500..d60f783e 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/setup/config.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/setup/config.go @@ -56,5 +56,13 @@ func InitConfig(vConfig *config.VirtualClusterConfig) error { } } + // check if previously we were using k0s as distro + if vConfig.Distro() != vclusterconfig.K0SDistro { + _, err = os.Stat("/data/k0s") + if err == nil { + return fmt.Errorf("seems like you were using k0s as a distro before and now have switched to %s, please make sure to not switch between vCluster distros", vConfig.Distro()) + } + } + return nil } diff --git a/vendor/github.com/loft-sh/vcluster/pkg/setup/controller_context.go b/vendor/github.com/loft-sh/vcluster/pkg/setup/controller_context.go index ab51f581..3e2f13d9 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/setup/controller_context.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/setup/controller_context.go @@ -52,14 +52,14 @@ func NewControllerContext(ctx context.Context, options *config.VirtualClusterCon // local manager bind address localManagerMetrics := "0" - if options.Experimental.SyncSettings.LocalManagerMetricsBindAddress != "" { - localManagerMetrics = options.Experimental.SyncSettings.LocalManagerMetricsBindAddress + if options.Experimental.SyncSettings.HostMetricsBindAddress != "" { + localManagerMetrics = options.Experimental.SyncSettings.HostMetricsBindAddress } // virtual manager bind address virtualManagerMetrics := "0" - if options.Experimental.SyncSettings.VirtualManagerMetricsBindAddress != "" { - virtualManagerMetrics = options.Experimental.SyncSettings.VirtualManagerMetricsBindAddress + if options.Experimental.SyncSettings.VirtualMetricsBindAddress != "" { + virtualManagerMetrics = options.Experimental.SyncSettings.VirtualMetricsBindAddress } // create physical manager diff --git a/vendor/github.com/loft-sh/vcluster/pkg/util/translate/multi_namespace.go b/vendor/github.com/loft-sh/vcluster/pkg/util/translate/multi_namespace.go index b2a8ae64..e36d1959 100644 --- a/vendor/github.com/loft-sh/vcluster/pkg/util/translate/multi_namespace.go +++ b/vendor/github.com/loft-sh/vcluster/pkg/util/translate/multi_namespace.go @@ -66,7 +66,7 @@ func (s *multiNamespace) IsManaged(obj runtime.Object) bool { // If obj is not in the synced namespace OR // If object-name annotation is not set OR // If object-name annotation is different from actual name - if !s.IsTargetedNamespace(metaAccessor.GetNamespace()) || metaAccessor.GetAnnotations() == nil || metaAccessor.GetAnnotations()[NameAnnotation] == "" || metaAccessor.GetAnnotations()[NameAnnotation] != metaAccessor.GetName() { + if !s.IsTargetedNamespace(metaAccessor.GetNamespace()) || metaAccessor.GetAnnotations() == nil || metaAccessor.GetAnnotations()[NameAnnotation] == "" { return false } diff --git a/vendor/github.com/loft-sh/vcluster/test/framework/framework.go b/vendor/github.com/loft-sh/vcluster/test/framework/framework.go index e9a540a5..aef0ebb1 100644 --- a/vendor/github.com/loft-sh/vcluster/test/framework/framework.go +++ b/vendor/github.com/loft-sh/vcluster/test/framework/framework.go @@ -158,7 +158,7 @@ func CreateFramework(ctx context.Context, scheme *runtime.Scheme) error { KubeConfig: vKubeconfigFile.Name(), LocalPort: 14550, // choosing a port that usually should be unused } - err = connectCmd.Connect(ctx, nil, name, nil) + err = connectCmd.Connect(ctx, name, nil) if err != nil { l.Fatalf("failed to connect to the vcluster: %v", err) } diff --git a/vendor/github.com/vmware-labs/yaml-jsonpath/LICENSE b/vendor/github.com/vmware-labs/yaml-jsonpath/LICENSE index bd225ee5..62a56a33 100644 --- a/vendor/github.com/vmware-labs/yaml-jsonpath/LICENSE +++ b/vendor/github.com/vmware-labs/yaml-jsonpath/LICENSE @@ -1,181 +1,181 @@ -yaml-jsonpath -Copyright (c) 2020 VMware, Inc. All rights reserved. - -The Apache 2.0 license (the "License") set forth below applies to all parts of the yaml-jsonpath project. You may not use this file except in compliance with the License. - -Apache License - -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, -and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the -copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other -entities that control, are controlled by, or are under common control -with that entity. For the purposes of this definition, "control" means -(i) the power, direct or indirect, to cause the direction or management -of such entity, whether by contract or otherwise, or (ii) ownership -of fifty percent (50%) or more of the outstanding shares, or (iii) -beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, -including but not limited to software source code, documentation source, -and configuration files. - -"Object" form shall mean any form resulting from mechanical transformation -or translation of a Source form, including but not limited to compiled -object code, generated documentation, and conversions to other media -types. - -"Work" shall mean the work of authorship, whether in Source or -Object form, made available under the License, as indicated by a copyright -notice that is included in or attached to the work (an example is provided -in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, -that is based on (or derived from) the Work and for which the editorial -revisions, annotations, elaborations, or other modifications represent, -as a whole, an original work of authorship. For the purposes of this -License, Derivative Works shall not include works that remain separable -from, or merely link (or bind by name) to the interfaces of, the Work -and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the -original version of the Work and any modifications or additions to -that Work or Derivative Works thereof, that is intentionally submitted -to Licensor for inclusion in the Work by the copyright owner or by an -individual or Legal Entity authorized to submit on behalf of the copyright -owner. For the purposes of this definition, "submitted" means any form of -electronic, verbal, or written communication sent to the Licensor or its -representatives, including but not limited to communication on electronic -mailing lists, source code control systems, and issue tracking systems -that are managed by, or on behalf of, the Licensor for the purpose of -discussing and improving the Work, but excluding communication that is -conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity -on behalf of whom a Contribution has been received by Licensor and -subsequently incorporated within the Work. - -2. Grant of Copyright License. -Subject to the terms and conditions of this License, each Contributor -hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, -royalty-free, irrevocable copyright license to reproduce, prepare -Derivative Works of, publicly display, publicly perform, sublicense, and -distribute the Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. -Subject to the terms and conditions of this License, each Contributor -hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, -royalty- free, irrevocable (except as stated in this section) patent -license to make, have made, use, offer to sell, sell, import, and -otherwise transfer the Work, where such license applies only to those -patent claims licensable by such Contributor that are necessarily -infringed by their Contribution(s) alone or by combination of -their Contribution(s) with the Work to which such Contribution(s) -was submitted. If You institute patent litigation against any entity -(including a cross-claim or counterclaim in a lawsuit) alleging that the -Work or a Contribution incorporated within the Work constitutes direct -or contributory patent infringement, then any patent licenses granted -to You under this License for that Work shall terminate as of the date -such litigation is filed. - -4. Redistribution. -You may reproduce and distribute copies of the Work or Derivative Works -thereof in any medium, with or without modifications, and in Source or -Object form, provided that You meet the following conditions: - - a. You must give any other recipients of the Work or Derivative Works - a copy of this License; and - - b. You must cause any modified files to carry prominent notices stating - that You changed the files; and - - c. You must retain, in the Source form of any Derivative Works that - You distribute, all copyright, patent, trademark, and attribution - notices from the Source form of the Work, excluding those notices - that do not pertain to any part of the Derivative Works; and - - d. If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one of - the following places: within a NOTICE text file distributed as part - of the Derivative Works; within the Source form or documentation, - if provided along with the Derivative Works; or, within a display - generated by the Derivative Works, if and wherever such third-party - notices normally appear. The contents of the NOTICE file are for - informational purposes only and do not modify the License. You - may add Your own attribution notices within Derivative Works that - You distribute, alongside or as an addendum to the NOTICE text - from the Work, provided that such additional attribution notices - cannot be construed as modifying the License. You may add Your own - copyright statement to Your modifications and may provide additional - or different license terms and conditions for use, reproduction, or - distribution of Your modifications, or for any such Derivative Works - as a whole, provided Your use, reproduction, and distribution of the - Work otherwise complies with the conditions stated in this License. - -5. Submission of Contributions. -Unless You explicitly state otherwise, any Contribution intentionally -submitted for inclusion in the Work by You to the Licensor shall be -under the terms and conditions of this License, without any additional -terms or conditions. Notwithstanding the above, nothing herein shall -supersede or modify the terms of any separate license agreement you may -have executed with Licensor regarding such Contributions. - -6. Trademarks. -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. -Unless required by applicable law or agreed to in writing, Licensor -provides the Work (and each Contributor provides its Contributions) on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -express or implied, including, without limitation, any warranties or -conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR -A PARTICULAR PURPOSE. You are solely responsible for determining the -appropriateness of using or redistributing the Work and assume any risks -associated with Your exercise of permissions under this License. - -8. Limitation of Liability. -In no event and under no legal theory, whether in tort (including -negligence), contract, or otherwise, unless required by applicable law -(such as deliberate and grossly negligent acts) or agreed to in writing, -shall any Contributor be liable to You for damages, including any direct, -indirect, special, incidental, or consequential damages of any character -arising as a result of this License or out of the use or inability to -use the Work (including but not limited to damages for loss of goodwill, -work stoppage, computer failure or malfunction, or any and all other -commercial damages or losses), even if such Contributor has been advised -of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. -While redistributing the Work or Derivative Works thereof, You may -choose to offer, and charge a fee for, acceptance of support, warranty, -indemnity, or other liability obligations and/or rights consistent with -this License. However, in accepting such obligations, You may act only -on Your own behalf and on Your sole responsibility, not on behalf of -any other Contributor, and only if You agree to indemnify, defend, and -hold each Contributor harmless for any liability incurred by, or claims -asserted against, such Contributor by reason of your accepting any such -warranty or additional liability. - -END OF TERMS AND CONDITIONS - - +yaml-jsonpath +Copyright (c) 2020 VMware, Inc. All rights reserved. + +The Apache 2.0 license (the "License") set forth below applies to all parts of the yaml-jsonpath project. You may not use this file except in compliance with the License. + +Apache License + +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the +copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other +entities that control, are controlled by, or are under common control +with that entity. For the purposes of this definition, "control" means +(i) the power, direct or indirect, to cause the direction or management +of such entity, whether by contract or otherwise, or (ii) ownership +of fifty percent (50%) or more of the outstanding shares, or (iii) +beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation source, +and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation +or translation of a Source form, including but not limited to compiled +object code, generated documentation, and conversions to other media +types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a copyright +notice that is included in or attached to the work (an example is provided +in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, +that is based on (or derived from) the Work and for which the editorial +revisions, annotations, elaborations, or other modifications represent, +as a whole, an original work of authorship. For the purposes of this +License, Derivative Works shall not include works that remain separable +from, or merely link (or bind by name) to the interfaces of, the Work +and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the +original version of the Work and any modifications or additions to +that Work or Derivative Works thereof, that is intentionally submitted +to Licensor for inclusion in the Work by the copyright owner or by an +individual or Legal Entity authorized to submit on behalf of the copyright +owner. For the purposes of this definition, "submitted" means any form of +electronic, verbal, or written communication sent to the Licensor or its +representatives, including but not limited to communication on electronic +mailing lists, source code control systems, and issue tracking systems +that are managed by, or on behalf of, the Licensor for the purpose of +discussing and improving the Work, but excluding communication that is +conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. +Subject to the terms and conditions of this License, each Contributor +hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, +royalty-free, irrevocable copyright license to reproduce, prepare +Derivative Works of, publicly display, publicly perform, sublicense, and +distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. +Subject to the terms and conditions of this License, each Contributor +hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, +royalty- free, irrevocable (except as stated in this section) patent +license to make, have made, use, offer to sell, sell, import, and +otherwise transfer the Work, where such license applies only to those +patent claims licensable by such Contributor that are necessarily +infringed by their Contribution(s) alone or by combination of +their Contribution(s) with the Work to which such Contribution(s) +was submitted. If You institute patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the +Work or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses granted +to You under this License for that Work shall terminate as of the date +such litigation is filed. + +4. Redistribution. +You may reproduce and distribute copies of the Work or Derivative Works +thereof in any medium, with or without modifications, and in Source or +Object form, provided that You meet the following conditions: + + a. You must give any other recipients of the Work or Derivative Works + a copy of this License; and + + b. You must cause any modified files to carry prominent notices stating + that You changed the files; and + + c. You must retain, in the Source form of any Derivative Works that + You distribute, all copyright, patent, trademark, and attribution + notices from the Source form of the Work, excluding those notices + that do not pertain to any part of the Derivative Works; and + + d. If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one of + the following places: within a NOTICE text file distributed as part + of the Derivative Works; within the Source form or documentation, + if provided along with the Derivative Works; or, within a display + generated by the Derivative Works, if and wherever such third-party + notices normally appear. The contents of the NOTICE file are for + informational purposes only and do not modify the License. You + may add Your own attribution notices within Derivative Works that + You distribute, alongside or as an addendum to the NOTICE text + from the Work, provided that such additional attribution notices + cannot be construed as modifying the License. You may add Your own + copyright statement to Your modifications and may provide additional + or different license terms and conditions for use, reproduction, or + distribution of Your modifications, or for any such Derivative Works + as a whole, provided Your use, reproduction, and distribution of the + Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. +Unless You explicitly state otherwise, any Contribution intentionally +submitted for inclusion in the Work by You to the Licensor shall be +under the terms and conditions of this License, without any additional +terms or conditions. Notwithstanding the above, nothing herein shall +supersede or modify the terms of any separate license agreement you may +have executed with Licensor regarding such Contributions. + +6. Trademarks. +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. +Unless required by applicable law or agreed to in writing, Licensor +provides the Work (and each Contributor provides its Contributions) on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +express or implied, including, without limitation, any warranties or +conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR +A PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. +In no event and under no legal theory, whether in tort (including +negligence), contract, or otherwise, unless required by applicable law +(such as deliberate and grossly negligent acts) or agreed to in writing, +shall any Contributor be liable to You for damages, including any direct, +indirect, special, incidental, or consequential damages of any character +arising as a result of this License or out of the use or inability to +use the Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all other +commercial damages or losses), even if such Contributor has been advised +of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. +While redistributing the Work or Derivative Works thereof, You may +choose to offer, and charge a fee for, acceptance of support, warranty, +indemnity, or other liability obligations and/or rights consistent with +this License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf of +any other Contributor, and only if You agree to indemnify, defend, and +hold each Contributor harmless for any liability incurred by, or claims +asserted against, such Contributor by reason of your accepting any such +warranty or additional liability. + +END OF TERMS AND CONDITIONS + + diff --git a/vendor/github.com/vmware-labs/yaml-jsonpath/NOTICE b/vendor/github.com/vmware-labs/yaml-jsonpath/NOTICE index a25cd785..055021a2 100644 --- a/vendor/github.com/vmware-labs/yaml-jsonpath/NOTICE +++ b/vendor/github.com/vmware-labs/yaml-jsonpath/NOTICE @@ -1,7 +1,7 @@ -yaml-jsonpath -Copyright (c) 2020 VMware, Inc. All Rights Reserved. - -This product is licensed to you under the Apache 2.0 license (the "License"). You may not use this product except in compliance with the Apache 2.0 License. - -This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file. - +yaml-jsonpath +Copyright (c) 2020 VMware, Inc. All Rights Reserved. + +This product is licensed to you under the Apache 2.0 license (the "License"). You may not use this product except in compliance with the Apache 2.0 License. + +This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file. + diff --git a/vendor/modules.txt b/vendor/modules.txt index 8fb77018..2352fe57 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -407,10 +407,9 @@ github.com/loft-sh/utils/pkg/command github.com/loft-sh/utils/pkg/downloader github.com/loft-sh/utils/pkg/downloader/commands github.com/loft-sh/utils/pkg/extract -# github.com/loft-sh/vcluster v0.20.0-alpha.2.0.20240403130844-8bb987ed97b4 +# github.com/loft-sh/vcluster v0.20.0-alpha.3.0.20240409111424-27cde82f6544 ## explicit; go 1.22.0 github.com/loft-sh/vcluster/cmd/vclusterctl/cmd -github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/create github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/localkubernetes github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/app/podprinter github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/find @@ -428,6 +427,7 @@ github.com/loft-sh/vcluster/pkg/authorization/impersonationauthorizer github.com/loft-sh/vcluster/pkg/authorization/kubeletauthorizer github.com/loft-sh/vcluster/pkg/certs github.com/loft-sh/vcluster/pkg/config +github.com/loft-sh/vcluster/pkg/config/legacyconfig github.com/loft-sh/vcluster/pkg/constants github.com/loft-sh/vcluster/pkg/controllers github.com/loft-sh/vcluster/pkg/controllers/coredns