diff --git a/cmd/stop.go b/cmd/stop.go index 19401326f..6a6f4aa55 100644 --- a/cmd/stop.go +++ b/cmd/stop.go @@ -12,6 +12,7 @@ import ( var ( noBackup bool projectId string + all bool stopCmd = &cobra.Command{ GroupID: groupLocalDev, @@ -19,7 +20,7 @@ var ( Short: "Stop all local Supabase containers", RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt) - return stop.Run(ctx, !noBackup, projectId, afero.NewOsFs()) + return stop.Run(ctx, !noBackup, projectId, all, afero.NewOsFs()) }, } ) @@ -30,5 +31,7 @@ func init() { flags.StringVar(&projectId, "project-id", "", "Local project ID to stop.") cobra.CheckErr(flags.MarkHidden("backup")) flags.BoolVar(&noBackup, "no-backup", false, "Deletes all data volumes after stopping.") + flags.BoolVar(&all, "all", false, "Stop all local Supabase instances from all projects across the machine.") + stopCmd.MarkFlagsMutuallyExclusive("project-id", "all") rootCmd.AddCommand(stopCmd) } diff --git a/docs/supabase/stop.md b/docs/supabase/stop.md index e18261ab3..870fa8603 100644 --- a/docs/supabase/stop.md +++ b/docs/supabase/stop.md @@ -5,3 +5,5 @@ Stops the Supabase local development stack. Requires `supabase/config.toml` to be created in your current working directory by running `supabase init`. All Docker resources are maintained across restarts. Use `--no-backup` flag to reset your local development data between restarts. + +Use the `--all` flag to stop all local Supabase projects instances on the machine. Use with caution with `--no-backup` as it will delete all supabase local projects data. \ No newline at end of file diff --git a/internal/db/start/start.go b/internal/db/start/start.go index 14c2e9a12..e7f03fd61 100644 --- a/internal/db/start/start.go +++ b/internal/db/start/start.go @@ -45,7 +45,7 @@ func Run(ctx context.Context, fsys afero.Fs) error { utils.Config.Analytics.Enabled = false err := StartDatabase(ctx, fsys, os.Stderr) if err != nil { - if err := utils.DockerRemoveAll(context.Background(), os.Stderr); err != nil { + if err := utils.DockerRemoveAll(context.Background(), os.Stderr, utils.Config.ProjectId); err != nil { fmt.Fprintln(os.Stderr, err) } } diff --git a/internal/start/start.go b/internal/start/start.go index 4b967e6ca..1a2226832 100644 --- a/internal/start/start.go +++ b/internal/start/start.go @@ -86,7 +86,7 @@ func Run(ctx context.Context, fsys afero.Fs, excludedContainers []string, ignore if ignoreHealthCheck && start.IsUnhealthyError(err) { fmt.Fprintln(os.Stderr, err) } else { - if err := utils.DockerRemoveAll(context.Background(), os.Stderr); err != nil { + if err := utils.DockerRemoveAll(context.Background(), os.Stderr, utils.Config.ProjectId); err != nil { fmt.Fprintln(os.Stderr, err) } return err diff --git a/internal/status/status.go b/internal/status/status.go index 0963be09c..e78af5cd2 100644 --- a/internal/status/status.go +++ b/internal/status/status.go @@ -89,7 +89,7 @@ func Run(ctx context.Context, names CustomName, format string, fsys afero.Fs) er func checkServiceHealth(ctx context.Context) ([]string, error) { resp, err := utils.Docker.ContainerList(ctx, container.ListOptions{ - Filters: utils.CliProjectFilter(), + Filters: utils.CliProjectFilter(utils.Config.ProjectId), }) if err != nil { return nil, errors.Errorf("failed to list running containers: %w", err) diff --git a/internal/stop/stop.go b/internal/stop/stop.go index b4bd94c23..39c5e00e5 100644 --- a/internal/stop/stop.go +++ b/internal/stop/stop.go @@ -11,33 +11,42 @@ import ( "github.com/supabase/cli/internal/utils" ) -func Run(ctx context.Context, backup bool, projectId string, fsys afero.Fs) error { - // Sanity checks. - if len(projectId) > 0 { - utils.Config.ProjectId = projectId - } else if err := utils.LoadConfigFS(fsys); err != nil { - return err +func Run(ctx context.Context, backup bool, projectId string, all bool, fsys afero.Fs) error { + var searchProjectIdFilter string + if !all { + // Sanity checks. + if len(projectId) > 0 { + utils.Config.ProjectId = projectId + } else if err := utils.LoadConfigFS(fsys); err != nil { + return err + } + searchProjectIdFilter = utils.Config.ProjectId } // Stop all services if err := utils.RunProgram(ctx, func(p utils.Program, ctx context.Context) error { w := utils.StatusWriter{Program: p} - return stop(ctx, backup, w) + return stop(ctx, backup, w, searchProjectIdFilter) }); err != nil { return err } fmt.Println("Stopped " + utils.Aqua("supabase") + " local development setup.") if resp, err := utils.Docker.VolumeList(ctx, volume.ListOptions{ - Filters: utils.CliProjectFilter(), + Filters: utils.CliProjectFilter(searchProjectIdFilter), }); err == nil && len(resp.Volumes) > 0 { - listVolume := fmt.Sprintf("docker volume ls --filter label=%s=%s", utils.CliProjectLabel, utils.Config.ProjectId) - utils.CmdSuggestion = "Local data are backed up to docker volume. Use docker to show them: " + utils.Aqua(listVolume) + if len(searchProjectIdFilter) > 0 { + listVolume := fmt.Sprintf("docker volume ls --filter label=%s=%s", utils.CliProjectLabel, searchProjectIdFilter) + utils.CmdSuggestion = "Local data are backed up to docker volume. Use docker to show them: " + utils.Aqua(listVolume) + } else { + listVolume := fmt.Sprintf("docker volume ls --filter label=%s", utils.CliProjectLabel) + utils.CmdSuggestion = "Local data are backed up to docker volume. Use docker to show them: " + utils.Aqua(listVolume) + } } return nil } -func stop(ctx context.Context, backup bool, w io.Writer) error { +func stop(ctx context.Context, backup bool, w io.Writer, projectId string) error { utils.NoBackupVolume = !backup - return utils.DockerRemoveAll(ctx, w) + return utils.DockerRemoveAll(ctx, w, projectId) } diff --git a/internal/stop/stop_test.go b/internal/stop/stop_test.go index 419df28bc..da571330b 100644 --- a/internal/stop/stop_test.go +++ b/internal/stop/stop_test.go @@ -3,6 +3,7 @@ package stop import ( "context" "errors" + "fmt" "io" "net/http" "os" @@ -47,7 +48,87 @@ func TestStopCommand(t *testing.T) { Name: utils.DbId, }}}) // Run test - err := Run(context.Background(), true, "", fsys) + err := Run(context.Background(), true, "", false, fsys) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("stops all instances when --all flag is used", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + require.NoError(t, utils.WriteConfig(fsys, false)) + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + + projects := []string{"project1", "project2"} + + // Mock initial ContainerList for all containers + gock.New(utils.Docker.DaemonHost()). + Get("/v"+utils.Docker.ClientVersion()+"/containers/json"). + MatchParam("all", "true"). + Reply(http.StatusOK). + JSON([]types.Container{ + {ID: "container1", Labels: map[string]string{utils.CliProjectLabel: "project1"}}, + {ID: "container2", Labels: map[string]string{utils.CliProjectLabel: "project2"}}, + }) + + // Mock initial VolumeList + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/volumes"). + Reply(http.StatusOK). + JSON(volume.ListResponse{ + Volumes: []*volume.Volume{ + {Name: "volume1", Labels: map[string]string{utils.CliProjectLabel: "project1"}}, + {Name: "volume2", Labels: map[string]string{utils.CliProjectLabel: "project2"}}, + }, + }) + + // Mock stopOneProject for each project + for _, projectId := range projects { + // Mock ContainerList for each project + gock.New(utils.Docker.DaemonHost()). + Get("/v"+utils.Docker.ClientVersion()+"/containers/json"). + MatchParam("all", "1"). + MatchParam("filters", fmt.Sprintf(`{"label":{"com.supabase.cli.project=%s":true}}`, projectId)). + Reply(http.StatusOK). + JSON([]types.Container{{ID: "container-" + projectId, State: "running"}}) + + // Mock container stop + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/container-" + projectId + "/stop"). + Reply(http.StatusOK) + + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/prune"). + Reply(http.StatusOK). + JSON(container.PruneReport{}) + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/networks/prune"). + Reply(http.StatusOK). + JSON(network.PruneReport{}) + gock.New(utils.Docker.DaemonHost()). + Get("/v"+utils.Docker.ClientVersion()+"/volumes"). + MatchParam("filters", fmt.Sprintf(`{"label":{"com.supabase.cli.project=%s":true}}`, projectId)). + Reply(http.StatusOK). + JSON(volume.ListResponse{Volumes: []*volume.Volume{{Name: "volume-" + projectId}}}) + } + + // Mock final ContainerList to verify all containers are stopped + gock.New(utils.Docker.DaemonHost()). + Get("/v"+utils.Docker.ClientVersion()+"/containers/json"). + MatchParam("all", "true"). + Reply(http.StatusOK). + JSON([]types.Container{}) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/json"). + Reply(http.StatusOK). + JSON([]types.Container{}) + + // Run test + err := Run(context.Background(), true, "", true, fsys) + // Check error assert.NoError(t, err) assert.Empty(t, apitest.ListUnmatchedRequests()) @@ -57,7 +138,7 @@ func TestStopCommand(t *testing.T) { // Setup in-memory fs fsys := afero.NewMemMapFs() // Run test - err := Run(context.Background(), false, "", fsys) + err := Run(context.Background(), false, "", false, fsys) // Check error assert.ErrorIs(t, err, os.ErrNotExist) }) @@ -73,7 +154,7 @@ func TestStopCommand(t *testing.T) { Get("/v" + utils.Docker.ClientVersion() + "/containers/json"). Reply(http.StatusServiceUnavailable) // Run test - err := Run(context.Background(), false, "test", afero.NewReadOnlyFs(fsys)) + err := Run(context.Background(), false, "test", false, afero.NewReadOnlyFs(fsys)) // Check error assert.ErrorContains(t, err, "request returned Service Unavailable for API route and version") assert.Empty(t, apitest.ListUnmatchedRequests()) @@ -102,7 +183,7 @@ func TestStopServices(t *testing.T) { Reply(http.StatusOK). JSON(network.PruneReport{}) // Run test - err := stop(context.Background(), true, io.Discard) + err := stop(context.Background(), true, io.Discard, utils.Config.ProjectId) // Check error assert.NoError(t, err) assert.Empty(t, apitest.ListUnmatchedRequests()) @@ -119,7 +200,7 @@ func TestStopServices(t *testing.T) { defer gock.OffAll() apitest.MockDockerStop(utils.Docker) // Run test - err := stop(context.Background(), false, io.Discard) + err := stop(context.Background(), false, io.Discard, utils.Config.ProjectId) // Check error assert.NoError(t, err) assert.Empty(t, apitest.ListUnmatchedRequests()) @@ -137,7 +218,7 @@ func TestStopServices(t *testing.T) { Post("/v" + utils.Docker.ClientVersion() + "/containers/prune"). ReplyError(errors.New("network error")) // Run test - err := stop(context.Background(), true, io.Discard) + err := stop(context.Background(), true, io.Discard, utils.Config.ProjectId) // Check error assert.ErrorContains(t, err, "network error") assert.Empty(t, apitest.ListUnmatchedRequests()) diff --git a/internal/utils/docker.go b/internal/utils/docker.go index 678d93e98..80106583b 100644 --- a/internal/utils/docker.go +++ b/internal/utils/docker.go @@ -92,9 +92,9 @@ func WaitAll[T any](containers []T, exec func(container T) error) []error { // NoBackupVolume TODO: encapsulate this state in a class var NoBackupVolume = false -func DockerRemoveAll(ctx context.Context, w io.Writer) error { +func DockerRemoveAll(ctx context.Context, w io.Writer, projectId string) error { fmt.Fprintln(w, "Stopping containers...") - args := CliProjectFilter() + args := CliProjectFilter(projectId) containers, err := Docker.ContainerList(ctx, container.ListOptions{ All: true, Filters: args, @@ -144,9 +144,14 @@ func DockerRemoveAll(ctx context.Context, w io.Writer) error { return nil } -func CliProjectFilter() filters.Args { +func CliProjectFilter(projectId string) filters.Args { + if len(projectId) == 0 { + return filters.NewArgs( + filters.Arg("label", CliProjectLabel), + ) + } return filters.NewArgs( - filters.Arg("label", CliProjectLabel+"="+Config.ProjectId), + filters.Arg("label", CliProjectLabel+"="+projectId), ) }