From 6b1c899c67a2aaa16356610c76b257da8c4be5f0 Mon Sep 17 00:00:00 2001 From: Marc Khouzam Date: Thu, 21 Dec 2023 14:51:08 -0500 Subject: [PATCH] Add a Time To Live (TTL) to the inventory cache (#605) Normally, the digest of the plugin inventory is checked every time the DB needs to be read. Although this is faster than downloading the DB each time, there is still between a 2.5s to 4.5s delay in checking the digest. This makes every `plugin search` and `plugin group search` command slower. It also makes some `plugin install --group` commands much slower when all plugins are available in the cache, since the installation of each plugin in the group causes a digest check, even if the plugin binary is already in the cache. This commit provides a type of Time To Live for the DB. This means that when within the TTL, the digest is not checked and the DB is considered valid. The time the digest was last checked is stored as the modification time (mtime) of the digest file. So, whenever the DB needs to be read, if the TTL has not expired since the last time the digest was verified, the DB is directly read from cache; if the TTL has expired, the digest is checked and the DB downloaded if required. On a "plugin source update" or "plugin source init" the TTL is ignored and the digest automatically checked. This is important as either of these commands usually modify the URI of the plugin discovery and therefore invalidates the DB. Note that for any discovery source added through the TANZU_CLI_ADDITIONAL_PLUGIN_DISCOVERY_IMAGES_TEST_ONLY variable, the digest is checked every time (this TTL feature does not apply); this is because there is no way to know if current cache was downloaded from the same URIs as what is currently in the variable. This is different than for "plugin source update/init" because these two commands can actively force a cache refresh but changing the TANZU_CLI_ADDITIONAL_PLUGIN_DISCOVERY_IMAGES_TEST_ONLY variable cannot do that. The value of the cache TTL is of 30 minutes. This means that it can take a CLI up to 30 minutes to notice the publication of new plugins in the central repository. If for some reason a user wants to force a refresh immediately, they can simply do `tanzu plugin source init` (or `tanzu plugin source update default -u ...` if the discovery source is not the default central repository). The TTL value can be overriden using the environment variable TANZU_CLI_PLUGIN_DB_DIGEST_TTL_SECONDS. * Store URI in the main plugin inventory digest file To allow the CLI to know if a plugin inventory cache has become invalid because it represents a different URI, we now store the OCI image URI inside the main digest file. Whenever the TTL is checked to know if the digest must be checked, the URI is also checked; if the URI has changed, the digest must be checked. Signed-off-by: Marc Khouzam --- docs/dev/centralized_plugin_discovery.md | 16 ++ pkg/command/discovery_source.go | 31 ++- pkg/command/discovery_source_test.go | 74 +++++- pkg/constants/env_variables.go | 3 + pkg/discovery/interface.go | 13 + pkg/discovery/oci.go | 3 + pkg/discovery/oci_dbbacked.go | 116 ++++++++- pkg/discovery/oci_dbbacked_test.go | 224 +++++++++++++++- test/e2e/Makefile | 1 - test/e2e/airgapped/airgapped_test.go | 51 +++- .../plugin_lifecycle/plugin_lifecycle_test.go | 246 +++++++++++++++++- 11 files changed, 753 insertions(+), 25 deletions(-) diff --git a/docs/dev/centralized_plugin_discovery.md b/docs/dev/centralized_plugin_discovery.md index 4c94050a4..9ae746bf1 100644 --- a/docs/dev/centralized_plugin_discovery.md +++ b/docs/dev/centralized_plugin_discovery.md @@ -34,6 +34,22 @@ The CLI then uses SQLite queries to obtain the desired information about one or more plugins. Using that information, the CLI can either list plugins to the user, or proceed to install plugins. +### Plugin Inventory Cache + +Pulling the plugin inventory OCI image takes a noticeable amount of time for the +CLI. Therefore, to keep the CLI quick and responsive, the plugin inventory DB is +cached under `$HOME/.cache/tanzu/plugin_inventory/default` and the CLI uses this +cache whenever it needs to query the DB. This cache has a time-to-live of 30 minutes, +which means that a CLI could at most require 30 minutes to become aware of new +plugins published to the central repository of plugins. + +If for some reason a user wants to force an immediate refresh of the plugin +inventory cache, they can run the `tanzu plugin source init` command. +To refresh the plugin inventory DB, the CLI first compares the digest of the +remote OCI image with the digest stored in the cache; if the digests match, +the DB need not be downloaded and is considered to have been refreshed, which +resets the TTL. + ### Plugin Groups Plugin groups define a list of plugin/version combinations that are applicable diff --git a/pkg/command/discovery_source.go b/pkg/command/discovery_source.go index 4cd69a0cb..3f81e3ae4 100644 --- a/pkg/command/discovery_source.go +++ b/pkg/command/discovery_source.go @@ -80,6 +80,7 @@ func newUpdateDiscoverySourceCmd() *cobra.Command { var updateDiscoverySourceCmd = &cobra.Command{ Use: "update SOURCE_NAME --uri ", Short: "Update a discovery source configuration", + Long: "Update a discovery source configuration and refresh the plugin inventory local cache", // We already include the only flag in the use text, // we therefore don't show '[flags]' in the usage text. DisableFlagsInUseLine: true, @@ -101,6 +102,18 @@ func newUpdateDiscoverySourceCmd() *cobra.Command { return err } + // Check the discovery source *before* we save it in the configuration + // file. This way, if the discovery source is invalid, we don't save it. + // NOTE: We cannot first save and then revert the change if the discovery + // source is invalid because it is possible that the check of the discovery + // will fail with a call to log.Fatal(), which will exit the program before + // we can revert the change; this happens when the discovery source is + // not properly signed. + err = checkDiscoverySource(newDiscoverySource) + if err != nil { + return err + } + err = configlib.SetCLIDiscoverySource(newDiscoverySource) if err != nil { return err @@ -155,6 +168,7 @@ func newInitDiscoverySourceCmd() *cobra.Command { var initDiscoverySourceCmd = &cobra.Command{ Use: "init", Short: "Initialize the discovery source to its default value", + Long: "Initialize the discovery source to its default value and refresh the plugin inventory local cache", Args: cobra.MaximumNArgs(0), // There are no flags DisableFlagsInUseLine: true, @@ -165,7 +179,10 @@ func newInitDiscoverySourceCmd() *cobra.Command { return err } - // Refresh the inventory DB + // Refresh the inventory DB as the URI may have changed. + // It is also useful to refresh the DB even if the URI has not changed; + // this way, a user can force a refresh of the DB by running this command + // without waiting for the TTL to expire. if discoverySource, err := configlib.GetCLIDiscoverySource(config.DefaultStandaloneDiscoveryName); err == nil { // Ignore any failures since the real operation // the user is trying to do is set the config @@ -192,14 +209,18 @@ func createDiscoverySource(dsName, uri string) (configtypes.PluginDiscovery, err Name: dsName, Image: uri, }} - err := checkDiscoverySource(pluginDiscoverySource) - return pluginDiscoverySource, err + return pluginDiscoverySource, nil } // checkDiscoverySource attempts to access the content of the discovery to -// confirm it is valid +// confirm it is valid; this implies refreshing the DB. func checkDiscoverySource(source configtypes.PluginDiscovery) error { - discObject, err := discovery.CreateDiscoveryFromV1alpha1(source) + // If the URI has changed, the cache will be refreshed automatically. However, if the URI has not changed, + // normally the TTL would be respected and the cache would not be refreshed. However, we choose to pass + // the WithForceRefresh() option to ensure we refresh the DB no matter if the TTL has expired or not. + // This provides a way for the user to force a refresh of the DB by running "tanzu plugin source init/update" + // without waiting for the TTL to expire. + discObject, err := discovery.CreateDiscoveryFromV1alpha1(source, discovery.WithForceRefresh()) if err != nil { return err } diff --git a/pkg/command/discovery_source_test.go b/pkg/command/discovery_source_test.go index f9de55e85..38db5955b 100644 --- a/pkg/command/discovery_source_test.go +++ b/pkg/command/discovery_source_test.go @@ -10,12 +10,14 @@ import ( "path/filepath" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/vmware-tanzu/tanzu-cli/pkg/common" "github.com/vmware-tanzu/tanzu-cli/pkg/config" "github.com/vmware-tanzu/tanzu-cli/pkg/constants" + "github.com/vmware-tanzu/tanzu-cli/pkg/plugininventory" configlib "github.com/vmware-tanzu/tanzu-plugin-runtime/config" configtypes "github.com/vmware-tanzu/tanzu-plugin-runtime/config/types" "github.com/vmware-tanzu/tanzu-plugin-runtime/log" @@ -29,13 +31,69 @@ func Test_createDiscoverySource(t *testing.T) { assert.NotNil(err) assert.Equal(err.Error(), "discovery source name cannot be empty") - // With an invalid image + // With an invalid image, no error are thrown, as there is no verification in createDiscoverySource() pd, err := createDiscoverySource("fake-oci-discovery-name", "test.registry.com/test-image:v1.0.0") - assert.NotNil(err) - assert.Contains(err.Error(), "unable to fetch the inventory of discovery 'fake-oci-discovery-name' for plugins") + assert.Nil(err) assert.NotNil(pd.OCI) assert.Equal(pd.OCI.Name, "fake-oci-discovery-name") assert.Equal(pd.OCI.Image, "test.registry.com/test-image:v1.0.0") + + // With a valid image + pd, err = createDiscoverySource(config.DefaultStandaloneDiscoveryName, constants.TanzuCLIDefaultCentralPluginDiscoveryImage) + assert.Nil(err) + assert.NotNil(pd.OCI) + assert.Equal(pd.OCI.Name, config.DefaultStandaloneDiscoveryName) + assert.Equal(pd.OCI.Image, constants.TanzuCLIDefaultCentralPluginDiscoveryImage) +} + +// test that checkDiscoverySource() will download the DB and digest file +// and that the digest file is updated even though the TTL has not expired. +func Test_checkDiscoverySource(t *testing.T) { + assert := assert.New(t) + + dir, err := os.MkdirTemp("", "test-source") + assert.Nil(err) + defer os.RemoveAll(dir) + + common.DefaultCacheDir = dir + + // Setup a valid image + pd, err := createDiscoverySource(config.DefaultStandaloneDiscoveryName, constants.TanzuCLIDefaultCentralPluginDiscoveryImage) + assert.Nil(err) + assert.NotNil(pd.OCI) + assert.Equal(pd.OCI.Name, config.DefaultStandaloneDiscoveryName) + assert.Equal(pd.OCI.Image, constants.TanzuCLIDefaultCentralPluginDiscoveryImage) + + // Test that when we check the discovery image, the DB gets filled + err = checkDiscoverySource(pd) + assert.Nil(err) + + // Check that the digest file was immediately created + pluginDataDir := filepath.Join(common.DefaultCacheDir, common.PluginInventoryDirName, config.DefaultStandaloneDiscoveryName) + matches, _ := filepath.Glob(filepath.Join(pluginDataDir, "digest.*")) + assert.Equal(1, len(matches)) + + // Get the timestamp of the digest file + digestFile := matches[0] + originalDigestFileStat, err := os.Stat(digestFile) + assert.Nil(err) + + // Check that the DB was downloaded + dbFile := filepath.Join(pluginDataDir, plugininventory.SQliteDBFileName) + _, err = os.Stat(dbFile) + assert.Nil(err) + + // check the discovery source again and make sure the digest file is immediately updated + // even though the TTL has not expired. + // sleep for 1 seconds to make sure the timestamp of the digest file is different + time.Sleep(1 * time.Second) + err = checkDiscoverySource(pd) + assert.Nil(err) + newDigestFileStat, err := os.Stat(digestFile) + assert.Nil(err) + + // Check that the digest file was updated + assert.True(newDigestFileStat.ModTime().After(originalDigestFileStat.ModTime())) } // Test_createAndListDiscoverySources test 'tanzu plugin source list' when TANZU_CLI_ADDITIONAL_PLUGIN_DISCOVERY_IMAGES_TEST_ONLY has set test only discovery sources @@ -294,6 +352,16 @@ func Test_updateDiscoverySources(t *testing.T) { if spec.expectedFailure { // Check we got the correct error assert.Contains(err.Error(), spec.expected) + + // Check the original discovery source was not updated + discoverySources, err := configlib.GetCLIDiscoverySources() + assert.Nil(err) + assert.Equal(1, len(discoverySources)) + + ds := discoverySources[0] + assert.NotNil(ds.OCI) + assert.Equal(config.DefaultStandaloneDiscoveryName, ds.OCI.Name) + assert.Equal("test/uri", ds.OCI.Image) } else { got, err := io.ReadAll(b) assert.Nil(err) diff --git a/pkg/constants/env_variables.go b/pkg/constants/env_variables.go index 33497b2f1..11358791b 100644 --- a/pkg/constants/env_variables.go +++ b/pkg/constants/env_variables.go @@ -42,4 +42,7 @@ const ( // Control the different ActiveHelp options ConfigVariableActiveHelp = "TANZU_ACTIVE_HELP" + + // Change the default value of the plugin inventory cache TTL + ConfigVariablePluginDBCacheTTL = "TANZU_CLI_PLUGIN_DB_CACHE_TTL_SECONDS" ) diff --git a/pkg/discovery/interface.go b/pkg/discovery/interface.go index f71f4ffe7..2642e6973 100644 --- a/pkg/discovery/interface.go +++ b/pkg/discovery/interface.go @@ -41,24 +41,37 @@ type GroupDiscovery interface { // DiscoveryOpts used to customize the plugin discovery process or mechanism type DiscoveryOpts struct { UseLocalCacheOnly bool // UseLocalCacheOnly used to pull the plugin data from the cache + ForceRefresh bool // ForceRefresh used to force a refresh of the plugin data PluginDiscoveryCriteria *PluginDiscoveryCriteria GroupDiscoveryCriteria *GroupDiscoveryCriteria } type DiscoveryOptions func(options *DiscoveryOpts) +// WithUseLocalCacheOnly used to get the plugin inventory data without first refreshing the cache +// even if the cache's TTL has expired func WithUseLocalCacheOnly() DiscoveryOptions { return func(o *DiscoveryOpts) { o.UseLocalCacheOnly = true } } +// WithForceRefresh used to force a refresh of the plugin inventory data +// even when the cache's TTL has not expired +func WithForceRefresh() DiscoveryOptions { + return func(o *DiscoveryOpts) { + o.ForceRefresh = true + } +} + +// WithPluginDiscoveryCriteria used to specify the plugin discovery criteria func WithPluginDiscoveryCriteria(criteria *PluginDiscoveryCriteria) DiscoveryOptions { return func(o *DiscoveryOpts) { o.PluginDiscoveryCriteria = criteria } } +// WithGroupDiscoveryCriteria used to specify the group discovery criteria func WithGroupDiscoveryCriteria(criteria *GroupDiscoveryCriteria) DiscoveryOptions { return func(o *DiscoveryOpts) { o.GroupDiscoveryCriteria = criteria diff --git a/pkg/discovery/oci.go b/pkg/discovery/oci.go index 7eabd600c..324c22bb8 100644 --- a/pkg/discovery/oci.go +++ b/pkg/discovery/oci.go @@ -28,6 +28,8 @@ func NewOCIDiscovery(name, image string, options ...DiscoveryOptions) Discovery if useCacheOnlyForTesting, _ := strconv.ParseBool(os.Getenv("TEST_TANZU_CLI_USE_DB_CACHE_ONLY")); useCacheOnlyForTesting { discovery.useLocalCacheOnly = true } + discovery.forceRefresh = opts.ForceRefresh + return discovery } @@ -46,6 +48,7 @@ func NewOCIGroupDiscovery(name, image string, options ...DiscoveryOptions) Group if useCacheOnlyForTesting, _ := strconv.ParseBool(os.Getenv("TEST_TANZU_CLI_USE_DB_CACHE_ONLY")); useCacheOnlyForTesting { discovery.useLocalCacheOnly = true } + discovery.forceRefresh = opts.ForceRefresh return discovery } diff --git a/pkg/discovery/oci_dbbacked.go b/pkg/discovery/oci_dbbacked.go index 885d53f19..2f78bf702 100644 --- a/pkg/discovery/oci_dbbacked.go +++ b/pkg/discovery/oci_dbbacked.go @@ -4,10 +4,12 @@ package discovery import ( + "bufio" "fmt" "os" "path/filepath" "strconv" + "time" "github.com/pkg/errors" @@ -21,6 +23,10 @@ import ( "github.com/vmware-tanzu/tanzu-plugin-runtime/log" ) +// inventoryRefreshTTLInSecs is the interval in seconds between two checks of the inventory digest. +// For testing, it can be overridden using the environment variable TANZU_CLI_PLUGIN_DB_CACHE_TTL_SECONDS. +const inventoryRefreshTTLInSecs = 30 * 60 // 30 minutes + // DBBackedOCIDiscovery is an artifact discovery utilizing an OCI image // which contains an SQLite database describing the content of the plugin // discovery. @@ -38,8 +44,11 @@ type DBBackedOCIDiscovery struct { // groupCriteria specifies different conditions that a plugin group must respect to be discovered. // This allows to filter the list of plugins groups that will be returned. groupCriteria *GroupDiscoveryCriteria - // useLocalCacheOnly enable to pull the plugins and plugin groups data from the cache + // useLocalCacheOnly enables to get the inventory data from the cache without first refreshing cache useLocalCacheOnly bool + // forceRefresh enables to force the refresh of the cached inventory data, + // even if the cache TTL has not expired + forceRefresh bool // pluginDataDir is the location where the plugin data will be stored once // extracted from the OCI image pluginDataDir string @@ -173,6 +182,17 @@ func (od *DBBackedOCIDiscovery) listGroupsFromInventory() ([]*plugininventory.Pl // fetchInventoryImage downloads the OCI image containing the information about the // inventory of this discovery and stores it in the cache directory. func (od *DBBackedOCIDiscovery) fetchInventoryImage() error { + if !od.forceRefresh && !od.cacheTTLExpired() { + // If we refreshed the inventory image recently, don't refresh again. + // The inventory image does not need to be up-to-date by the second. + // This avoids uselessly refreshing for commands that are run close together. + // For example: + // 1- installing plugins for a plugin group (it is fast when the plugins are in the cache) + // 2- installing plugins when creating a context (it is fast when the plugins are in the cache) + // 3- multiple "plugin search" and "plugin group search" commands in a row + return nil + } + // check the cache to see if downloaded plugin inventory database is up-to-date or not // by comparing the image digests newCacheHashFileForInventoryImage, newCacheHashFileForMetadataImage, err := od.checkImageCache() @@ -182,6 +202,7 @@ func (od *DBBackedOCIDiscovery) fetchInventoryImage() error { if newCacheHashFileForInventoryImage == "" && newCacheHashFileForMetadataImage == "" { // The cache can be re-used. We are done. + od.resetCacheTTL() return nil } @@ -201,13 +222,30 @@ func (od *DBBackedOCIDiscovery) fetchInventoryImage() error { return err } - // Now that everything is ready, create the digest hash file + // Now that the new DB has been downloaded, we can reset the TTL. + // We do this because it is possible that only the metadata digest has changed, + // so we must reset the TTL on the main digest file. + od.resetCacheTTL() + + // Now that everything is ready, create the digest hash file. + // Both the name of the file and its content are important. if newCacheHashFileForInventoryImage != "" { - _, _ = os.Create(newCacheHashFileForInventoryImage) + if file, err := os.Create(newCacheHashFileForInventoryImage); err == nil { + // We also store the URI of the image in the digest file so that we can + // know in the future if the URI has changed. This has particular value + // for images added using TANZU_CLI_ADDITIONAL_PLUGIN_DISCOVERY_IMAGES_TEST_ONLY + // since they are not stored in the config file and therefore we cannot associate + // a discovery name with a URI such discoveries. + _, _ = file.WriteString(od.image) + file.Close() + } } + // Also create digest hash file for inventory metadata image if not empty if newCacheHashFileForMetadataImage != "" { - _, _ = os.Create(newCacheHashFileForMetadataImage) + if file, err := os.Create(newCacheHashFileForMetadataImage); err == nil { + file.Close() + } } return nil @@ -269,6 +307,7 @@ func (od *DBBackedOCIDiscovery) checkImageCache() (string, string, error) { // Get the latest digest of the discovery image. // If the cache already contains the image with this digest // we do not need to verify its signature nor to download it again. + log.Infof("Refreshing plugin inventory cache for %q, this will take a few seconds.", od.image) _, hashHexValInventoryImage, err := carvelhelpers.GetImageDigest(od.image) if err != nil { // This will happen when the user has configured an invalid image discovery URI @@ -320,7 +359,21 @@ func (od *DBBackedOCIDiscovery) checkDigestFileExistence(hashHexVal, digestPrefi } } else if len(matches) == 1 { if matches[0] == correctHashFile { - // The hash file exists which means the DB is up-to-date. We are done. + if digestPrefix == "" { + // If the main digest has not changed it means we have the DB for this OCI image URI. + // Normally that implies the URI stored in the main digest file has not changed either. + // However, there are corner cases where the URI of the image could still have + // changed but the digest has not. For example, the URI in the digest + // file can be "example.com/image:latest" and the discovery image has changed + // to "example.com/image"; both URIs are actually the same image with the + // same digest. In such cases, the digest file will be retained since the image + // does not need to be re-downloaded. To deal with such cases, we set the + // (possibly new) URI in the digest file each time the digest is not changed. + if file, err := os.Create(matches[0]); err == nil { + _, _ = file.WriteString(od.image) + file.Close() + } + } return "" } // The hash file indicates a different digest hash. Remove this old hash file @@ -329,3 +382,56 @@ func (od *DBBackedOCIDiscovery) checkDigestFileExistence(hashHexVal, digestPrefi } return correctHashFile } + +func getCacheTTLValue() int { + cacheTTL := inventoryRefreshTTLInSecs + cacheTTLOverride := os.Getenv(constants.ConfigVariablePluginDBCacheTTL) + if cacheTTLOverride != "" { + cacheTTLOverrideValue, err := strconv.Atoi(cacheTTLOverride) + if err == nil && cacheTTLOverrideValue >= 0 { + cacheTTL = cacheTTLOverrideValue + } + } + return cacheTTL +} + +// cacheTTLExpired checks if the last time the cache was refreshed has passed its TTL. +func (od *DBBackedOCIDiscovery) cacheTTLExpired() bool { + matches, _ := filepath.Glob(filepath.Join(od.pluginDataDir, "digest.*")) + if len(matches) == 1 { + file, err := os.Open(matches[0]) + if err != nil { + return true + } + defer file.Close() + + scanner := bufio.NewScanner(file) + if scanner.Scan() { + // Check that the discovery image URI matches what is in the digest file. + // This is important for test discoveries which can be changed by setting + // the TANZU_CLI_ADDITIONAL_PLUGIN_DISCOVERY_IMAGES_TEST_ONLY variable. + // If the image URI is not correct then the cache must be refreshed. + cachedURI := scanner.Text() + if cachedURI == od.image { + // The URI matches. Now check the modification time of the digest file to see + // if the TTL is expired. + if stat, err := os.Stat(matches[0]); err == nil { + return time.Since(stat.ModTime()) > time.Duration(getCacheTTLValue())*time.Second + } + } + } + } + + // We need to refresh the cache. + return true +} + +// resetCacheTTL resets the modification timestamp of the digest file to the current time. +// This is used to avoid checking the inventory image digest too often. +func (od *DBBackedOCIDiscovery) resetCacheTTL() { + matches, _ := filepath.Glob(filepath.Join(od.pluginDataDir, "digest.*")) + if len(matches) == 1 { + var zeroTime time.Time + _ = os.Chtimes(matches[0], zeroTime, time.Now()) + } +} diff --git a/pkg/discovery/oci_dbbacked_test.go b/pkg/discovery/oci_dbbacked_test.go index 910c9bfc7..b031e3b43 100644 --- a/pkg/discovery/oci_dbbacked_test.go +++ b/pkg/discovery/oci_dbbacked_test.go @@ -4,13 +4,19 @@ package discovery import ( + "bufio" "os" + "path/filepath" + "strings" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/vmware-tanzu/tanzu-cli/pkg/common" "github.com/vmware-tanzu/tanzu-cli/pkg/constants" "github.com/vmware-tanzu/tanzu-cli/pkg/plugininventory" + configlib "github.com/vmware-tanzu/tanzu-plugin-runtime/config" configtypes "github.com/vmware-tanzu/tanzu-plugin-runtime/config/types" ) @@ -84,7 +90,6 @@ var _ = Describe("Unit tests for DB-backed OCI discovery", func() { Context("Without any criteria", func() { It("should have a filter that only ignores hidden plugins", func() { discovery := NewOCIDiscovery("test-discovery", "test-image:latest") - Expect(err).To(BeNil(), "unable to create discovery") dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") @@ -103,7 +108,6 @@ var _ = Describe("Unit tests for DB-backed OCI discovery", func() { }) It("with TANZU_CLI_INCLUDE_DEACTIVATED_PLUGINS_TEST_ONLY=1 the filter should include hidden plugins", func() { discovery := NewOCIDiscovery("test-discovery", "test-image:latest") - Expect(err).To(BeNil(), "unable to create discovery") dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") @@ -142,7 +146,6 @@ var _ = Describe("Unit tests for DB-backed OCI discovery", func() { Arch: filteredArch, } discovery := NewOCIDiscovery("test-discovery", "test-image:latest", WithPluginDiscoveryCriteria(criteria)) - Expect(err).To(BeNil(), "unable to create discovery") dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") @@ -173,7 +176,6 @@ var _ = Describe("Unit tests for DB-backed OCI discovery", func() { Arch: filteredArch, } discovery := NewOCIDiscovery("test-discovery", "test-image:latest", WithPluginDiscoveryCriteria(criteria)) - Expect(err).To(BeNil(), "unable to create discovery") dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") @@ -225,7 +227,6 @@ var _ = Describe("Unit tests for DB-backed OCI discovery", func() { Context("Without any criteria", func() { It("should use a filter that ignores hidden groups", func() { discovery := NewOCIDiscovery("test-discovery", "test-image:latest") - Expect(err).To(BeNil(), "unable to create discovery") dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") @@ -244,7 +245,6 @@ var _ = Describe("Unit tests for DB-backed OCI discovery", func() { }) It("with TANZU_CLI_INCLUDE_DEACTIVATED_PLUGINS_TEST_ONLY=1 the filter should include hidden groups", func() { discovery := NewOCIDiscovery("test-discovery", "test-image:latest") - Expect(err).To(BeNil(), "unable to create discovery") dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") @@ -279,7 +279,6 @@ var _ = Describe("Unit tests for DB-backed OCI discovery", func() { Name: filteredName, } discovery := NewOCIGroupDiscovery("test-discovery", "test-image:latest", WithGroupDiscoveryCriteria(criteria)) - Expect(err).To(BeNil(), "unable to create discovery") dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") @@ -306,7 +305,6 @@ var _ = Describe("Unit tests for DB-backed OCI discovery", func() { Name: filteredName, } discovery := NewOCIGroupDiscovery("test-discovery", "test-image:latest", WithGroupDiscoveryCriteria(criteria)) - Expect(err).To(BeNil(), "unable to create discovery") dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") @@ -352,7 +350,6 @@ var _ = Describe("Unit tests for DB-backed OCI discovery", func() { Context("checkImageCache function", func() { It("should show a detailed error", func() { discovery := NewOCIDiscovery("test-discovery", "test-image:latest") - Expect(err).To(BeNil(), "unable to create discovery") dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") @@ -361,5 +358,214 @@ var _ = Describe("Unit tests for DB-backed OCI discovery", func() { Expect(err.Error()).To(ContainSubstring(`plugins discovery image resolution failed. Please check that the repository image URL "test-image:latest" is correct: error getting the image digest: GET https://index.docker.io/v2/library/test-image/manifests/latest`)) }) }) + + Context("checkDigestFileExistence function", func() { + const ( + validDigest = "1234567890" + discoveryName = "test-discovery" + imageURI = "test-image:latest" + ) + var dbDir, digestFile string + var err error + BeforeEach(func() { + // Create a fake db file + dbDir, err = os.MkdirTemp("", "test-cache-dir") + Expect(err).To(BeNil()) + + common.DefaultCacheDir = dbDir + + // Create the directory for the DB file + pluginDBdir := filepath.Join(common.DefaultCacheDir, common.PluginInventoryDirName, discoveryName) + err := os.MkdirAll(pluginDBdir, 0755) + Expect(err).To(BeNil()) + // Create the DB file + pluginDBFile := filepath.Join(pluginDBdir, plugininventory.SQliteDBFileName) + file, err := os.Create(pluginDBFile) + Expect(err).To(BeNil()) + file.Close() + + // Create a the digest file with the URI of the image as its content + digestFile = filepath.Join(pluginDBdir, "digest."+validDigest) + file, err = os.Create(digestFile) + Expect(err).To(BeNil()) + _, err = file.WriteString(imageURI) + Expect(err).To(BeNil()) + file.Close() + }) + AfterEach(func() { + os.RemoveAll(dbDir) + }) + + It("should return empty if the digest matches", func() { + discovery := NewOCIDiscovery(discoveryName, imageURI) + dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) + Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") + + digestFileName := dbDiscovery.checkDigestFileExistence(validDigest, "") + Expect(digestFileName).To(BeEmpty(), "expected an empty digest filename") + + Expect(checkFileContentIsEqual(digestFile, imageURI)).To(BeTrue(), "expected the digest file to have the same content as the image URI") + }) + It("should have update the URI in the digest file if the digest matches but the URI has changed", func() { + newImageURI := "test-image" + discovery := NewOCIDiscovery(discoveryName, newImageURI) + dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) + Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") + + digestFileName := dbDiscovery.checkDigestFileExistence(validDigest, "") + Expect(digestFileName).To(BeEmpty(), "expected an empty digest filename") + + Expect(checkFileContentIsEqual(digestFile, newImageURI)).To(BeTrue(), "expected the digest file to have the same content as the image URI") + }) + It("should return a new digest file name if the digest does not match", func() { + discovery := NewOCIDiscovery(discoveryName, imageURI) + dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) + Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") + + newdigest := "0987654321" + expectedDigestFileName := filepath.Join(common.DefaultCacheDir, common.PluginInventoryDirName, discoveryName, "digest."+newdigest) + digestFileName := dbDiscovery.checkDigestFileExistence(newdigest, "") + Expect(digestFileName).To(Equal(expectedDigestFileName), "expected a new digest filename") + + // Check that the existing digest file was removed + _, err := os.Stat(digestFile) + Expect(os.IsNotExist(err)).To(BeTrue(), "expected the old digest file to be removed") + }) + }) + + Context("cacheTTLExpired and resetCacheTTL functions", func() { + var dbDir, expiredDigest, nonExpiredDigest string + var err error + BeforeEach(func() { + // Create a fake db file + dbDir, err = os.MkdirTemp("", "test-cache-dir") + Expect(err).To(BeNil()) + + common.DefaultCacheDir = dbDir + + // Create an expired and an non-expired discovery in the cache directory + for _, discoveryName := range []string{"test-expired", "test-notexpired"} { + imageURI := discoveryName + "-image:latest" + + // Create the discovery in the config file but not for the "test additional" ones + discovery := configtypes.PluginDiscovery{ + OCI: &configtypes.OCIDiscovery{ + Name: discoveryName, + Image: imageURI, + }} + err = configlib.SetCLIDiscoverySource(discovery) + Expect(err).To(BeNil()) + + // Create the directory for the DB file + pluginDBdir := filepath.Join(common.DefaultCacheDir, common.PluginInventoryDirName, discoveryName) + err := os.MkdirAll(pluginDBdir, 0755) + Expect(err).To(BeNil()) + // Create the DB file + pluginDBFile := filepath.Join(pluginDBdir, plugininventory.SQliteDBFileName) + file, err := os.Create(pluginDBFile) + Expect(err).To(BeNil()) + file.Close() + + // Create a the digest file with the URI of the image as its content + digestFile := filepath.Join(pluginDBdir, "digest.1234567890") + file, err = os.Create(digestFile) + Expect(err).To(BeNil()) + _, err = file.WriteString(imageURI) + Expect(err).To(BeNil()) + file.Close() + + // Set an expired timestamp for the digest file and a non-expired timestamp for the other + if strings.Contains(discoveryName, "notexpired") { + // Set an non-expired time of 2 seconds ago so the TTL is not expired + err = os.Chtimes(digestFile, time.Now(), time.Now().Add(-2*time.Second)) + Expect(err).To(BeNil()) + nonExpiredDigest = digestFile + } else { + // Set an expired time of 30 hours ago so the TTL is expired + err = os.Chtimes(digestFile, time.Now(), time.Now().Add(-30*time.Hour)) + Expect(err).To(BeNil()) + expiredDigest = digestFile + } + } + }) + AfterEach(func() { + os.RemoveAll(dbDir) + os.Unsetenv(constants.ConfigVariablePluginDBCacheTTL) + }) + It("cacheTTLExpired should return true when TTL is expired", func() { + discovery := NewOCIDiscovery("test-expired", "test-expired-image:latest") + dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) + Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") + + Expect(dbDiscovery.cacheTTLExpired()).To(BeTrue()) + }) + It("cacheTTLExpired should return false when TTL is not expired", func() { + discovery := NewOCIDiscovery("test-notexpired", "test-notexpired-image:latest") + dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) + Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") + + Expect(dbDiscovery.cacheTTLExpired()).To(BeFalse()) + }) + It("cacheTTLExpired should return true when URI changed", func() { + discovery := NewOCIDiscovery("test-notexpired", "changedURI:latest") + dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) + Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") + + Expect(dbDiscovery.cacheTTLExpired()).To(BeTrue()) + }) + It("cacheTTLExpired should return true when we shorten the TTL to an expired value", func() { + discovery := NewOCIDiscovery("test-notexpired", "test-notexpired-image:latest") + dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) + Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") + + // Set the TTL to 1 second, which should expire this digest + os.Setenv(constants.ConfigVariablePluginDBCacheTTL, "1") + Expect(dbDiscovery.cacheTTLExpired()).To(BeTrue()) + }) + It("cacheTTLExpired should return true when there is no DB, even if the TTL has not expired", func() { + discovery := NewOCIDiscovery("test-notexpired", "test-notexpired-image:latest") + dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) + Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") + + // Remove the cache, as if a plugin clean had occurred + os.RemoveAll(filepath.Join(common.DefaultCacheDir, common.PluginInventoryDirName)) + + // Make sure the cache is considered expired + Expect(dbDiscovery.cacheTTLExpired()).To(BeTrue()) + }) + + It("resetCacheTTL should reset the digest file to time.Now()", func() { + discovery := NewOCIDiscovery("test-notexpired", "test-image:latest") + dbDiscovery, ok := discovery.(*DBBackedOCIDiscovery) + Expect(ok).To(BeTrue(), "oci discovery is not of type DBBackedOCIDiscovery") + + dbDiscovery.resetCacheTTL() + + // The mtime of the digest files should be very very close to the current time + // Let's check that it is within 1 second of the current time. + stat, err := os.Stat(expiredDigest) + Expect(err).To(BeNil()) + Expect(time.Since(stat.ModTime()).Seconds()).Should(BeNumerically("<", 1*time.Second)) + + stat, err = os.Stat(nonExpiredDigest) + Expect(err).To(BeNil()) + Expect(time.Since(stat.ModTime()).Seconds()).Should(BeNumerically("<", 1*time.Second)) + }) + }) }) }) + +func checkFileContentIsEqual(filename, content string) bool { + file, err := os.Open(filename) + if err != nil { + return false + } + defer file.Close() + + scanner := bufio.NewScanner(file) + if scanner.Scan() { + firstLine := scanner.Text() + return firstLine == content + } + return false +} diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 400f11b78..28af856cd 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -91,7 +91,6 @@ e2e-plugin-compatibility-tests: .PHONY: e2e-plugin-lifecycle-tests ## Execute CLI Core Plugin life cycle E2E test cases e2e-plugin-lifecycle-tests: export TANZU_CLI_E2E_TEST_LOCAL_CENTRAL_REPO_URL=$(TANZU_CLI_E2E_TEST_LOCAL_CENTRAL_REPO_URL) ; \ - export TANZU_CLI_PLUGIN_DISCOVERY_IMAGE_SIGNATURE_VERIFICATION_SKIP_LIST=$(TANZU_CLI_E2E_TEST_LOCAL_CENTRAL_REPO_URL) ; \ export TANZU_CLI_E2E_TEST_LOCAL_CENTRAL_REPO_PLUGIN_DISCOVERY_IMAGE_SIGNATURE_PUBLIC_KEY_PATH=$(TANZU_CLI_E2E_TEST_LOCAL_CENTRAL_REPO_PLUGIN_DISCOVERY_IMAGE_SIGNATURE_PUBLIC_KEY_PATH) ; \ export TANZU_CLI_E2E_TEST_LOCAL_CENTRAL_REPO_HOST=${TANZU_CLI_E2E_TEST_LOCAL_CENTRAL_REPO_HOST} ; \ export TANZU_CLI_E2E_TEST_LOCAL_CENTRAL_REPO_CA_CERT_PATH=${TANZU_CLI_E2E_TEST_LOCAL_CENTRAL_REPO_CA_CERT_PATH} ; \ diff --git a/test/e2e/airgapped/airgapped_test.go b/test/e2e/airgapped/airgapped_test.go index c2e7870e6..cf95e0309 100644 --- a/test/e2e/airgapped/airgapped_test.go +++ b/test/e2e/airgapped/airgapped_test.go @@ -8,10 +8,12 @@ import ( "os" "path/filepath" "strings" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/vmware-tanzu/tanzu-cli/pkg/constants" "github.com/vmware-tanzu/tanzu-cli/test/e2e/framework" pluginlifecyclee2e "github.com/vmware-tanzu/tanzu-cli/test/e2e/plugin_lifecycle" ) @@ -114,14 +116,49 @@ var _ = framework.CLICoreDescribe("[Tests:E2E][Feature:Airgapped-Plugin-Download // Test case: upload plugin bundle downloaded using vmware-tmc/tmc-user:v9.9.9 plugin-group to the airgapped repository It("upload plugin bundle downloaded using vmware-tmc/tmc-user:v9.9.9 plugin-group to the airgapped repository", func() { - err := tf.PluginCmd.UploadPluginBundle(e2eAirgappedCentralRepo, filepath.Join(tempDir, "plugin_bundle_vmware-tmc-default-v9.9.9.tar.gz")) + // First update the plugin source just to force a reset of the digest TTL. + err := framework.UpdatePluginDiscoverySource(tf, e2eAirgappedCentralRepoImage) + Expect(err).To(BeNil(), "should not get any error for plugin source update") + + // Now, upload more plugins to the same URI as the one used for the previous test case. + // This means we are modifying the plugin source and the CLI will need to download the new DB. + // However, the CLI will only refresh the DB after the cache TTL has expired. + err = tf.PluginCmd.UploadPluginBundle(e2eAirgappedCentralRepo, filepath.Join(tempDir, "plugin_bundle_vmware-tmc-default-v9.9.9.tar.gz")) Expect(err).To(BeNil(), "should not get any error while uploading plugin bundle with specific group") }) + It("validate that ONLY the plugins from group 'vmware-tkg/default:v0.0.1' exists because the digest TTL has not expired so the DB has not been refreshed", func() { + pluginGroups, err = pluginlifecyclee2e.SearchAllPluginGroups(tf) + Expect(err).To(BeNil(), framework.NoErrorForPluginGroupSearch) + // check all expected plugin groups are available in the `plugin group search` output from the airgapped repository + expectedPluginGroups := []*framework.PluginGroup{{Group: "vmware-tkg/default", Latest: "v0.0.1", Description: "Desc for vmware-tkg/default:v0.0.1"}} + Expect(framework.IsAllPluginGroupsExists(pluginGroups, expectedPluginGroups)).Should(BeTrue(), "all required plugin groups for life cycle tests should exists in plugin group search output") + + // search plugins and make sure correct number of plugins available + // check expected plugins are available in the `plugin search` output from the airgapped repository + expectedPlugins := pluginsForPGTKG001 + expectedPlugins = append(expectedPlugins, essentialPlugins...) // Essential plugin will be always installed + pluginsSearchList, err = pluginlifecyclee2e.SearchAllPlugins(tf) + Expect(err).To(BeNil(), framework.NoErrorForPluginSearch) + Expect(len(pluginsSearchList)).To(Equal(len(expectedPlugins))) + Expect(framework.CheckAllPluginsExists(pluginsSearchList, expectedPlugins)).To(BeTrue()) + }) + It("validate the plugins from group 'vmware-tmc/tmc-user:v9.9.9' exists", func() { + // Temporarily set the TTL to something small + os.Setenv(constants.ConfigVariablePluginDBCacheTTL, "3") + + // Wait for the digest TTL to expire so that the DB is refreshed. + time.Sleep(time.Second * 5) // Sleep for 5 seconds + // search plugin groups and make sure there plugin groups available + // This command will force a refresh of the DB since the TTL has been set to a smaller value pluginGroups, err = pluginlifecyclee2e.SearchAllPluginGroups(tf) Expect(err).To(BeNil(), framework.NoErrorForPluginGroupSearch) + + // Unset the TTL override now that the DB has been refreshed + os.Unsetenv(constants.ConfigVariablePluginDBCacheTTL) + // check all expected plugin groups are available in plugin group search output expectedPluginGroups := []*framework.PluginGroup{ {Group: "vmware-tkg/default", Latest: "v0.0.1", Description: "Desc for vmware-tkg/default:v0.0.1"}, @@ -161,8 +198,14 @@ var _ = framework.CLICoreDescribe("[Tests:E2E][Feature:Airgapped-Plugin-Download // Test case: upload plugin bundle downloaded using vmware-tmc/tmc-user:v0.0.1 plugin-group to the airgapped repository It("upload plugin bundle downloaded using vmware-tmc/tmc-user:v0.0.1 plugin-group to the airgapped repository", func() { + // We are modifying the plugin source and the CLI will need to download the new DB. + // However, the CLI will only refresh the DB after the cache TTL has expired. err := tf.PluginCmd.UploadPluginBundle(e2eAirgappedCentralRepo, filepath.Join(tempDir, "plugin_bundle_vmware-tmc-v0.0.1.tar.gz")) Expect(err).To(BeNil(), "should not get any error while downloading plugin bundle with specific group") + + // Force a DB refresh by updating the plugin source + err = framework.UpdatePluginDiscoverySource(tf, e2eAirgappedCentralRepoImage) + Expect(err).To(BeNil(), "should not get any error for plugin source update") }) It("validate the plugins from group 'vmware-tmc/tmc-user:v0.0.1' exists", func() { @@ -208,8 +251,14 @@ var _ = framework.CLICoreDescribe("[Tests:E2E][Feature:Airgapped-Plugin-Download // Test case: upload plugin bundle downloaded without specifying plugin-group to the airgapped repository It("upload plugin bundle downloaded without specifying plugin-group to the airgapped repository", func() { + // Again we are modifying the plugin source and the CLI will need to download the new DB. + // However, the CLI will only refresh the DB after the cache TTL has expired. err := tf.PluginCmd.UploadPluginBundle(e2eAirgappedCentralRepo, filepath.Join(tempDir, "plugin_bundle_complete.tar.gz")) Expect(err).To(BeNil(), "should not get any error while uploading plugin bundle without specifying group") + + // Force a DB refresh by updating the plugin source + err = framework.UpdatePluginDiscoverySource(tf, e2eAirgappedCentralRepoImage) + Expect(err).To(BeNil(), "should not get any error for plugin source update") }) // Test case: validate that all plugins and plugin groups exists diff --git a/test/e2e/plugin_lifecycle/plugin_lifecycle_test.go b/test/e2e/plugin_lifecycle/plugin_lifecycle_test.go index 3b0466099..6810c5efc 100644 --- a/test/e2e/plugin_lifecycle/plugin_lifecycle_test.go +++ b/test/e2e/plugin_lifecycle/plugin_lifecycle_test.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "path/filepath" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -70,7 +71,7 @@ var _ = framework.CLICoreDescribe("[Tests:E2E][Feature:Plugin-lifecycle]", func( Expect(matches[0]).To(Equal(e2eDigestFileName), "digest file should not have changed") }) // Test case: (negative test) delete plugin source which is not exists - It("negative test case: delete plugin source which is not exists", func() { + It("negative test case: delete plugin source which does not exists", func() { wrongName := framework.RandomString(5) _, err := tf.PluginCmd.DeletePluginDiscoverySource(wrongName) Expect(err.Error()).To(ContainSubstring(fmt.Sprintf(framework.DiscoverySourceNotFound, wrongName))) @@ -105,6 +106,24 @@ var _ = framework.CLICoreDescribe("[Tests:E2E][Feature:Plugin-lifecycle]", func( // Set the original signature public key path back os.Setenv(framework.TanzuCliPluginDiscoveryImageSignaturePublicKeyPath, originalSignaturePublicKeyPath) }) + It("try to set the plugin source to an unsigned one and make sure it does not get changed", func() { + // To make the plugin source unsigned, we remove the signature public key path + originalSignaturePublicKeyPath := os.Getenv(framework.TanzuCliPluginDiscoveryImageSignaturePublicKeyPath) + // Unset the signature public key path to get the default one + os.Unsetenv(framework.TanzuCliPluginDiscoveryImageSignaturePublicKeyPath) + + _, err := tf.PluginCmd.UpdatePluginDiscoverySource(&framework.DiscoveryOptions{Name: pluginSourceName, SourceType: framework.SourceType, URI: e2eTestLocalCentralRepoURL}) + Expect(err).ToNot(BeNil(), "should get an error for plugin source update to unsigned image") + + // Check the plugin source was not updated + list, err := tf.PluginCmd.ListPluginSources() + Expect(err).To(BeNil(), "should not get any error for plugin source list") + Expect(framework.IsPluginSourceExists(list, pluginSourceName)).To(BeTrue()) + Expect(list[0].Image).To(Equal(constants.TanzuCLIDefaultCentralPluginDiscoveryImage)) + + // Set the original signature public key path back + os.Setenv(framework.TanzuCliPluginDiscoveryImageSignaturePublicKeyPath, originalSignaturePublicKeyPath) + }) It("put back the E2E plugin repository", func() { _, err := tf.PluginCmd.UpdatePluginDiscoverySource(&framework.DiscoveryOptions{Name: pluginSourceName, SourceType: framework.SourceType, URI: e2eTestLocalCentralRepoURL}) Expect(err).To(BeNil(), "should not get any error for plugin source update") @@ -344,4 +363,229 @@ var _ = framework.CLICoreDescribe("[Tests:E2E][Feature:Plugin-lifecycle]", func( Expect(len(pluginsList)).Should(Equal(0), "there should not be any plugins available after uninstall all") }) }) + + // use case: Plugin inventory DB digest check is only done once its TTL has expired + // a. run a "tanzu plugin source update" to do a digest check + // b. remove the metadata.digest.none file. This will cause a printout when the digest is checked after the cache TTL has expired + // c. set the TTL to 12 seconds and sleep for 14 seconds and check that a "tanzu plugin search" does a digest check + // d. repeatedly sleep a few seconds, then run a "tanzu plugin search" and make sure no digest check is done (no printout) + // e. sleep a few seconds passed the TTL, then run a "tanzu plugin search" and make sure the digest check is done (printout) + // f. unset the TTL override + // g. clean plugins (which will also remove the DB file) and make sure a "tanzu plugin search" immediately does a digest check + // h. cleanup + Context("plugin inventory DB digest check is only done once its TTL has expired", func() { + const ( + pluginSourceName = "default" + refreshingDBPrintout = "Reading plugin inventory for" + ) + pluginDataDir := filepath.Join(framework.TestHomeDir, ".cache", "tanzu", common.PluginInventoryDirName, pluginSourceName) + metadataDigest := filepath.Join(pluginDataDir, "metadata.digest.none") + + It("update plugin source to force a refresh of the digest", func() { + err := framework.UpdatePluginDiscoverySource(tf, e2eTestLocalCentralRepoURL) + Expect(err).To(BeNil(), "should not get any error for plugin source update") + + // Do a plugin list to get the essential plugins installed, so that + // it does not happen when we are running the digest test below + pluginsList, err := framework.GetPluginsList(tf, true) + Expect(err).To(BeNil(), "should not get any error for plugin list") + Expect(len(pluginsList)).Should(Equal(1), "the essential plugin should be installed") + }) + // Test case: verify that a plugin search does not check the digest until the TTL has expired + It("plugin search uses the existing DB until TTL expires", func() { + // Use this function to remove the metadata digest file so that we can expect the + // refreshingDBPrintout printout defined above when the digest is checked + // after its TTL has expired + removeDigestFile := func() { + // Remove the metadata digest file + err := os.Remove(metadataDigest) + Expect(err).To(BeNil(), "unable to remove metadata digest file") + } + + // Set the TTL to something small: 12 seconds + os.Setenv(constants.ConfigVariablePluginDBCacheTTL, "12") + + // Now wait for the TTL to expire so we can start fresh + removeDigestFile() + time.Sleep(time.Second * 14) // Sleep for 14 seconds + _, errStream, err := tf.PluginCmd.RunPluginCmd("search --name plugin_first") + Expect(err).To(BeNil()) + Expect(errStream).To(ContainSubstring(refreshingDBPrintout)) + + // For the first 9 seconds, we should not see any printouts about refreshing the DB + removeDigestFile() + for i := 0; i < 3; i++ { + time.Sleep(time.Second * 3) // Sleep for 3 seconds + + _, errStream, err = tf.PluginCmd.RunPluginCmd(fmt.Sprintf("search --name plugin_%d", i)) + Expect(err).To(BeNil()) + // No printouts on the error stream about refreshing the DB + Expect(errStream).ToNot(ContainSubstring(refreshingDBPrintout)) + + // No digest file created + _, err := os.Stat(metadataDigest) + Expect(err).ToNot(BeNil(), "should not have found a digest file") + } + + // Once the TTL of 12 seconds has expired, we should see a printout about refreshing the DB + time.Sleep(time.Second * 5) // Sleep for a final 5 seconds + + _, errStream, err = tf.PluginCmd.RunPluginCmd("search --name plugin_last") + Expect(err).To(BeNil()) + // Now we expect printouts on the error stream about refreshing the DB + Expect(errStream).To(ContainSubstring(refreshingDBPrintout)) + + // Also, the digest file should have been created + _, err = os.Stat(metadataDigest) + Expect(err).To(BeNil(), "metadata digest file should have been created") + + // Unset the TTL override + os.Unsetenv(constants.ConfigVariablePluginDBCacheTTL) + }) + // Run "plugin clean" which also removes the plugin DB and make sure a "plugin search" immediately does a digest check + It("clean DB and do a plugin search", func() { + err := tf.PluginCmd.CleanPlugins() + Expect(err).To(BeNil(), "should not get any error for plugin clean") + + // Make sure a "plugin search" immediately does a digest check + _, errPrintout, err := tf.PluginCmd.RunPluginCmd("search --name plugin_after_clean") + Expect(err).To(BeNil()) + // Now we expect printouts on the error stream about refreshing the DB + Expect(errPrintout).To(ContainSubstring(refreshingDBPrintout)) + }) + // Clean up at the end. + It("clean plugins and verify with plugin list", func() { + err := tf.PluginCmd.CleanPlugins() + Expect(err).To(BeNil(), "should not get any error for plugin clean") + pluginsList, err := framework.GetPluginsList(tf, true) + Expect(err).To(BeNil(), "should not get any error for plugin list") + Expect(len(pluginsList)).Should(Equal(0), "there should not be any plugins available after uninstall all") + }) + }) + + // use case: Plugin inventory DB digest check is only done once its TTL has expired for + // discoveries added through TANZU_CLI_ADDITIONAL_PLUGIN_DISCOVERY_IMAGES_TEST_ONLY + // a. remove the default discovery and add a discovery image to TANZU_CLI_ADDITIONAL_PLUGIN_DISCOVERY_IMAGES_TEST_ONLY + // b. remove the metadata.digest.none file. This will cause a printout when the digest is checked after the cache TTL has expired + // c. set the TTL to 12 seconds and sleep for 14 seconds and check that a "tanzu plugin search" does a digest check + // d. repeatedly sleep a few seconds, then run a "tanzu plugin search" and make sure no digest check is done (no printout) + // e. sleep a few seconds passed the TTL, then run a "tanzu plugin search" and make sure the digest check is done (printout) + // f. unset the TTL override + // g. clean plugins (which will also remove the DB file) and make sure a "tanzu plugin search" immediately does a digest check + // h. cleanup + Context("plugin inventory DB digest check is only done once its TTL has expired", func() { + const ( + defaultPluginSourceName = "default" + testPluginSourceName = "disc_0" + refreshingDBPrintout = "Reading plugin inventory for" + ) + pluginDataDir := filepath.Join(framework.TestHomeDir, ".cache", "tanzu", common.PluginInventoryDirName, testPluginSourceName) + metadataDigest := filepath.Join(pluginDataDir, "metadata.digest.none") + + It("delete default plugin source and add a test one", func() { + _, err := tf.PluginCmd.DeletePluginDiscoverySource("default") + Expect(err).To(BeNil(), "should not get any error for plugin source delete") + + os.Setenv("TANZU_CLI_ADDITIONAL_PLUGIN_DISCOVERY_IMAGES_TEST_ONLY", e2eTestLocalCentralRepoURL) + + // Do a plugin group search to fill the cache with the plugin inventory + _, _, err = tf.PluginCmd.RunPluginCmd("group search") + Expect(err).To(BeNil()) + + // Do a plugin list to get the essential plugins installed, so that + // it does not happen when we are running the digest test below + pluginsList, err := framework.GetPluginsList(tf, true) + Expect(err).To(BeNil(), "should not get any error for plugin list") + Expect(len(pluginsList)).Should(Equal(1), "the essential plugin should be installed") + }) + // Test case: verify that a plugin search does not check the digest until the TTL has expired + It("plugin search uses the existing DB until TTL expires", func() { + // Use this function to remove the metadata digest file so that we can expect the + // refreshingDBPrintout printout defined above when the digest is checked + // after its TTL has expired + removeDigestFile := func() { + // Remove the metadata digest file + err := os.Remove(metadataDigest) + Expect(err).To(BeNil(), "unable to remove metadata digest file") + } + + // Set the TTL to something small: 12 seconds + os.Setenv(constants.ConfigVariablePluginDBCacheTTL, "12") + + // Now wait for the TTL to expire so we can start fresh + removeDigestFile() + time.Sleep(time.Second * 14) // Sleep for 14 seconds + _, errStream, err := tf.PluginCmd.RunPluginCmd("search --name plugin_first") + Expect(err).To(BeNil()) + Expect(errStream).To(ContainSubstring(refreshingDBPrintout)) + + // For the first 9 seconds, we should not see any printouts about refreshing the DB + removeDigestFile() + for i := 0; i < 3; i++ { + time.Sleep(time.Second * 3) // Sleep for 3 seconds + + _, errStream, err = tf.PluginCmd.RunPluginCmd(fmt.Sprintf("search --name plugin_%d", i)) + Expect(err).To(BeNil()) + // No printouts on the error stream about refreshing the DB + Expect(errStream).ToNot(ContainSubstring(refreshingDBPrintout)) + + // No digest file created + _, err := os.Stat(metadataDigest) + Expect(err).ToNot(BeNil(), "should not have found a digest file") + } + + // Once the TTL of 12 seconds has expired, we should see a printout about refreshing the DB + time.Sleep(time.Second * 5) // Sleep for a final 5 seconds + + _, errStream, err = tf.PluginCmd.RunPluginCmd("search --name plugin_last") + Expect(err).To(BeNil()) + // Now we expect printouts on the error stream about refreshing the DB + Expect(errStream).To(ContainSubstring(refreshingDBPrintout)) + + // Also, the digest file should have been created + _, err = os.Stat(metadataDigest) + Expect(err).To(BeNil(), "metadata digest file should have been created") + + // Unset the TTL override + os.Unsetenv(constants.ConfigVariablePluginDBCacheTTL) + }) + // Change the TANZU_CLI_ADDITIONAL_PLUGIN_DISCOVERY_IMAGES_TEST_ONLY value and make sure + // the digest check is done immediately + It("set a different test discovery", func() { + os.Setenv("TANZU_CLI_ADDITIONAL_PLUGIN_DISCOVERY_IMAGES_TEST_ONLY", "localhost:9876/tanzu-cli/plugins/central:large") + + // Make sure a "plugin search" immediately does a digest check + _, errPrintout, err := tf.PluginCmd.RunPluginCmd("search --name plugin_after_new_discovery") + Expect(err).To(BeNil()) + // We expect printouts on the error stream about refreshing the DB + Expect(errPrintout).To(ContainSubstring(refreshingDBPrintout)) + }) + // Clean up at the end. + It("clean plugins and verify with plugin list", func() { + err := tf.PluginCmd.CleanPlugins() + Expect(err).To(BeNil(), "should not get any error for plugin clean") + pluginsList, err := framework.GetPluginsList(tf, true) + Expect(err).To(BeNil(), "should not get any error for plugin list") + Expect(len(pluginsList)).Should(Equal(0), "there should not be any plugins available after uninstall all") + }) + It("put back the E2E plugin repository", func() { + os.Unsetenv("TANZU_CLI_ADDITIONAL_PLUGIN_DISCOVERY_IMAGES_TEST_ONLY") + + // Save the original signature public key path + originalSignaturePublicKeyPath := os.Getenv(framework.TanzuCliPluginDiscoveryImageSignaturePublicKeyPath) + // Unset the signature public key path to get the default one + os.Unsetenv(framework.TanzuCliPluginDiscoveryImageSignaturePublicKeyPath) + + // First put back the "default" plugin source + _, err := tf.PluginCmd.InitPluginDiscoverySource() + Expect(err).To(BeNil(), "should not get any error for plugin source init") + + // Set the original signature public key path back + os.Setenv(framework.TanzuCliPluginDiscoveryImageSignaturePublicKeyPath, originalSignaturePublicKeyPath) + + // Now reset it to the e2e test url + _, err = tf.PluginCmd.UpdatePluginDiscoverySource(&framework.DiscoveryOptions{Name: defaultPluginSourceName, SourceType: framework.SourceType, URI: e2eTestLocalCentralRepoURL}) + Expect(err).To(BeNil(), "should not get any error for plugin source update") + }) + }) })