Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove SS proxy and postgres containers #155

Merged
merged 1 commit into from
Nov 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions internal/cc/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (
// The instance is the global variable holding onto all data that must be shared
// between tests, such as the configuration options and the deployed containers.
type Instance struct {
ssDeployment *deploy.SlidingSyncDeployment
ssDeployment *deploy.ComplementCryptoDeployment
ssMutex *sync.Mutex
complementCryptoConfig *config.ComplementCrypto
}
Expand Down Expand Up @@ -55,7 +55,7 @@ func (i *Instance) TestMain(m *testing.M, namespace string) {
//
// Tests will rarely use this function directly, preferring to use TestContext.
// See Instance.CreateTestContext
func (i *Instance) Deploy(t *testing.T) *deploy.SlidingSyncDeployment {
func (i *Instance) Deploy(t *testing.T) *deploy.ComplementCryptoDeployment {
i.ssMutex.Lock()
defer i.ssMutex.Unlock()
if i.ssDeployment != nil {
Expand Down
4 changes: 1 addition & 3 deletions internal/cc/test_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ type ClientCreationRequest struct {
// TestContext provides a consistent set of variables which most tests will need access to.
// The variables are suitable for a single test.
type TestContext struct {
Deployment *deploy.SlidingSyncDeployment
Deployment *deploy.ComplementCryptoDeployment
RPCBinaryPath string
RPCInstance atomic.Int32

Expand Down Expand Up @@ -305,8 +305,6 @@ func (c *TestContext) MustCreateClient(t *testing.T, req *ClientCreationRequest)
opts := api.NewClientCreationOpts(req.User.CSAPI)
// now apply the supplied opts on top
opts.Combine(&req.Opts)
// always set the SS URL based on the client type HS
opts.SlidingSyncURL = c.Deployment.SlidingSyncURLForHS(t, req.User.ClientType.HS)
if req.Multiprocess {
req.Opts = opts
return c.mustCreateMultiprocessClient(t, req)
Expand Down
144 changes: 15 additions & 129 deletions internal/deploy/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,9 @@ import (
"context"
"fmt"
"io"
"io/fs"
"log"
"net/url"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
Expand All @@ -31,7 +29,7 @@ import (

const mitmDumpFilePathOnContainer = "/tmp/mitm.dump"

type SlidingSyncDeployment struct {
type ComplementCryptoDeployment struct {
complement.Deployment
extraContainers map[string]testcontainers.Container
mitmClient *mitm.Client
Expand All @@ -43,47 +41,36 @@ type SlidingSyncDeployment struct {

// MITM returns a client capable of configuring man-in-the-middle operations such as
// snooping on CSAPI traffic and modifying responses.
func (d *SlidingSyncDeployment) MITM() *mitm.Client {
func (d *ComplementCryptoDeployment) MITM() *mitm.Client {
return d.mitmClient
}

func (d *SlidingSyncDeployment) UnauthenticatedClient(t ct.TestLike, serverName string) *client.CSAPI {
func (d *ComplementCryptoDeployment) UnauthenticatedClient(t ct.TestLike, serverName string) *client.CSAPI {
return d.withReverseProxyURL(serverName, d.Deployment.UnauthenticatedClient(t, serverName))
}

func (d *SlidingSyncDeployment) Register(t ct.TestLike, hsName string, opts helpers.RegistrationOpts) *client.CSAPI {
func (d *ComplementCryptoDeployment) Register(t ct.TestLike, hsName string, opts helpers.RegistrationOpts) *client.CSAPI {
return d.withReverseProxyURL(hsName, d.Deployment.Register(t, hsName, opts))
}

func (d *SlidingSyncDeployment) Login(t ct.TestLike, hsName string, existing *client.CSAPI, opts helpers.LoginOpts) *client.CSAPI {
func (d *ComplementCryptoDeployment) Login(t ct.TestLike, hsName string, existing *client.CSAPI, opts helpers.LoginOpts) *client.CSAPI {
return d.withReverseProxyURL(hsName, d.Deployment.Login(t, hsName, existing, opts))
}

func (d *SlidingSyncDeployment) AppServiceUser(t ct.TestLike, hsName, appServiceUserID string) *client.CSAPI {
func (d *ComplementCryptoDeployment) AppServiceUser(t ct.TestLike, hsName, appServiceUserID string) *client.CSAPI {
return d.withReverseProxyURL(hsName, d.Deployment.AppServiceUser(t, hsName, appServiceUserID))
}

func (d *SlidingSyncDeployment) SlidingSyncURLForHS(t ct.TestLike, hsName string) string {
switch hsName {
case "hs1":
return d.dnsToReverseProxyURL["ssproxy1"]
case "hs2":
return d.dnsToReverseProxyURL["ssproxy2"]
}
ct.Fatalf(t, "SlidingSyncURLForHS: unknown hs name '%s'", hsName)
return ""
}

// Replace the actual HS URL with a mitmproxy reverse proxy URL so we can sniff/intercept/modify traffic.
func (d *SlidingSyncDeployment) withReverseProxyURL(hsName string, c *client.CSAPI) *client.CSAPI {
func (d *ComplementCryptoDeployment) withReverseProxyURL(hsName string, c *client.CSAPI) *client.CSAPI {
d.mu.RLock()
defer d.mu.RUnlock()
proxyURL := d.dnsToReverseProxyURL[hsName]
c.BaseURL = proxyURL
return c
}

func (d *SlidingSyncDeployment) writeMITMDump() {
func (d *ComplementCryptoDeployment) writeMITMDump() {
if d.mitmDumpFile == "" {
return
}
Expand All @@ -104,7 +91,7 @@ func (d *SlidingSyncDeployment) writeMITMDump() {
}
}

func (d *SlidingSyncDeployment) Teardown() {
func (d *ComplementCryptoDeployment) Teardown() {
d.writeMITMDump()
for name, c := range d.extraContainers {
filename := fmt.Sprintf("container-%s.log", name)
Expand Down Expand Up @@ -151,7 +138,7 @@ func (d *SlidingSyncDeployment) Teardown() {
}
}

func RunNewDeployment(t *testing.T, mitmAddonsDir, mitmDumpFile string) *SlidingSyncDeployment {
func RunNewDeployment(t *testing.T, mitmAddonsDir, mitmDumpFile string) *ComplementCryptoDeployment {
// allow time for everything to deploy
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
Expand All @@ -160,65 +147,21 @@ func RunNewDeployment(t *testing.T, mitmAddonsDir, mitmDumpFile string) *Sliding
deployment := complement.Deploy(t, 2)
networkName := deployment.Network()

// rather than use POSTGRES_DB which only lets us make 1 db, inject some sql
// to allow us to make 2 DBs, one for each SS instance on each HS.
createdbFile := filepath.Join(os.TempDir(), "createdb.sql")
err := os.WriteFile(createdbFile, []byte(`
CREATE DATABASE syncv3_hs1;
CREATE DATABASE syncv3_hs2;
`), fs.ModePerm)
if err != nil {
ct.Fatalf(t, "failed to write createdb.sql: %s", err)
}

// Make a postgres container
postgresContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: testcontainers.ContainerRequest{
Image: "postgres:13-alpine",
ExposedPorts: []string{"5432/tcp"},
Env: map[string]string{
"POSTGRES_USER": "postgres",
"POSTGRES_PASSWORD": "postgres",
},
Files: []testcontainers.ContainerFile{
{
HostFilePath: createdbFile,
ContainerFilePath: "/docker-entrypoint-initdb.d/create-dbs.sql",
FileMode: 0o777,
},
},
WaitingFor: wait.ForExec([]string{"pg_isready"}).WithExitCodeMatcher(func(exitCode int) bool {
fmt.Println("pg_isready exit code", exitCode)
return exitCode == 0
}).WithPollInterval(time.Second),
Networks: []string{networkName},
NetworkAliases: map[string][]string{
networkName: {"postgres"},
},
},
Started: true,
})
must.NotError(t, "failed to start postgres container", err)

// Make the mitmproxy and hardcode CONTAINER PORTS for hs1/hs2. HOST PORTS are still dynamically allocated.
// By running this container on the same network as the homeservers, we can leverage DNS hence hs1/hs2 URLs.
// We also need to preload addons into the proxy, so we bind mount the addons directory. This also allows
// test authors to easily add custom addons.
hs1ExposedPort := "3000/tcp"
hs2ExposedPort := "3001/tcp"
ss1RevProxyExposedPort := "3002/tcp"
ss2RevProxyExposedPort := "3003/tcp"
controllerExposedPort := "8080/tcp" // default mitmproxy uses
mitmContainerReq := testcontainers.ContainerRequest{
Image: "mitmproxy/mitmproxy:10.1.5",
ExposedPorts: []string{hs1ExposedPort, hs2ExposedPort, controllerExposedPort, ss1RevProxyExposedPort, ss2RevProxyExposedPort},
ExposedPorts: []string{hs1ExposedPort, hs2ExposedPort, controllerExposedPort},
Env: map[string]string{},
Cmd: []string{
"mitmdump",
"--mode", "reverse:http://hs1:8008@3000",
"--mode", "reverse:http://hs2:8008@3001",
"--mode", "reverse:http://ssproxy1:6789@3002",
"--mode", "reverse:http://ssproxy2:6789@3003",
"--mode", "regular",
"-w", mitmDumpFilePathOnContainer,
"-s", "/addons/__init__.py",
Expand Down Expand Up @@ -253,89 +196,32 @@ func RunNewDeployment(t *testing.T, mitmAddonsDir, mitmDumpFile string) *Sliding

rpHS1URL := externalURL(t, mitmproxyContainer, hs1ExposedPort)
rpHS2URL := externalURL(t, mitmproxyContainer, hs2ExposedPort)
rpSS1URL := externalURL(t, mitmproxyContainer, ss1RevProxyExposedPort)
rpSS2URL := externalURL(t, mitmproxyContainer, ss2RevProxyExposedPort)
controllerURL := externalURL(t, mitmproxyContainer, controllerExposedPort)

// Make 2x sliding sync proxy
ssExposedPort := "6789/tcp"
ss1Container, err := testcontainers.GenericContainer(ctx,
testcontainers.GenericContainerRequest{
ContainerRequest: testcontainers.ContainerRequest{
Image: "ghcr.io/matrix-org/sliding-sync:v0.99.17",
ExposedPorts: []string{ssExposedPort},
Env: map[string]string{
"SYNCV3_SECRET": "secret",
"SYNCV3_BINDADDR": ":6789",
"SYNCV3_SERVER": "http://hs1:8008",
"SYNCV3_LOG_LEVEL": "trace",
"SYNCV3_DB": "user=postgres dbname=syncv3_hs1 sslmode=disable password=postgres host=postgres",
},
WaitingFor: wait.ForLog("listening on"),
Networks: []string{networkName},
NetworkAliases: map[string][]string{
networkName: {"ssproxy1"},
},
},
Started: true,
})
must.NotError(t, "failed to start sliding sync container", err)
ss2Container, err := testcontainers.GenericContainer(ctx,
testcontainers.GenericContainerRequest{
ContainerRequest: testcontainers.ContainerRequest{
Image: "ghcr.io/matrix-org/sliding-sync:v0.99.17",
ExposedPorts: []string{ssExposedPort},
Env: map[string]string{
"SYNCV3_SECRET": "secret",
"SYNCV3_BINDADDR": ":6789",
"SYNCV3_SERVER": "http://hs2:8008",
"SYNCV3_LOG_LEVEL": "trace",
"SYNCV3_DB": "user=postgres dbname=syncv3_hs2 sslmode=disable password=postgres host=postgres",
},
WaitingFor: wait.ForLog("listening on"),
Networks: []string{networkName},
NetworkAliases: map[string][]string{
networkName: {"ssproxy2"},
},
},
Started: true,
})
must.NotError(t, "failed to start sliding sync container", err)

ss1URL := externalURL(t, ss1Container, ssExposedPort)
ss2URL := externalURL(t, ss2Container, ssExposedPort)
csapi1 := deployment.UnauthenticatedClient(t, "hs1")
csapi2 := deployment.UnauthenticatedClient(t, "hs2")

// log for debugging purposes
t.Logf("SlidingSyncDeployment created (network=%s):", networkName)
t.Logf("ComplementCryptoDeployment created (network=%s):", networkName)
t.Logf(" NAME INT EXT")
t.Logf(" sliding sync: ssproxy1 %s (rp=%s)", ss1URL, rpSS1URL)
t.Logf(" sliding sync: ssproxy2 %s (rp=%s)", ss2URL, rpSS2URL)
t.Logf(" synapse: hs1 %s (rp=%s)", csapi1.BaseURL, rpHS1URL)
t.Logf(" synapse: hs2 %s (rp=%s)", csapi2.BaseURL, rpHS2URL)
t.Logf(" postgres: postgres")
t.Logf(" mitmproxy: mitmproxy controller=%s", controllerURL)
// without this, GHA will fail when trying to hit the controller with "Post "http://mitm.code/options/lock": EOF"
// suspected IPv4 vs IPv6 problems in Docker as Flask is listening on v4/v6.
controllerURL = strings.Replace(controllerURL, "localhost", "127.0.0.1", 1)
proxyURL, err := url.Parse(controllerURL)
must.NotError(t, "failed to parse controller URL", err)
return &SlidingSyncDeployment{
return &ComplementCryptoDeployment{
Deployment: deployment,
extraContainers: map[string]testcontainers.Container{
"ssproxy1": ss1Container,
"ssproxy2": ss2Container,
"postgres": postgresContainer,
"mitmproxy": mitmproxyContainer,
},
ControllerURL: controllerURL,
mitmClient: mitm.NewClient(proxyURL, deployment.GetConfig().HostnameRunningComplement),
dnsToReverseProxyURL: map[string]string{
"hs1": rpHS1URL,
"hs2": rpHS2URL,
"ssproxy1": rpSS1URL,
"ssproxy2": rpSS2URL,
"hs1": rpHS1URL,
"hs2": rpHS2URL,
},
mitmDumpFile: mitmDumpFile,
}
Expand Down
7 changes: 3 additions & 4 deletions internal/tests/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ import (

var (
ssMutex *sync.Mutex = &sync.Mutex{}
ssDeployment *deploy.SlidingSyncDeployment
ssDeployment *deploy.ComplementCryptoDeployment
// aka functions which make clients, and we don't care about the language.
// Tests just loop through this array for each client impl.
clientFactories []func(t *testing.T, cfg api.ClientCreationOpts) api.TestClient
)

func Deploy(t *testing.T) *deploy.SlidingSyncDeployment {
func Deploy(t *testing.T) *deploy.ComplementCryptoDeployment {
ssMutex.Lock()
defer ssMutex.Unlock()
if ssDeployment != nil {
Expand Down Expand Up @@ -217,14 +217,13 @@ func TestSendingEvents(t *testing.T) {
}

// run a subtest for each client factory
func ForEachClient(t *testing.T, name string, deployment *deploy.SlidingSyncDeployment, fn func(t *testing.T, client api.TestClient, csapi *client.CSAPI)) {
func ForEachClient(t *testing.T, name string, deployment *deploy.ComplementCryptoDeployment, fn func(t *testing.T, client api.TestClient, csapi *client.CSAPI)) {
for _, createClient := range clientFactories {
csapiAlice := deployment.Register(t, "hs1", helpers.RegistrationOpts{
LocalpartSuffix: "client",
Password: "complement-crypto-password",
})
opts := api.NewClientCreationOpts(csapiAlice)
opts.SlidingSyncURL = deployment.SlidingSyncURLForHS(t, "hs1")
client := createClient(t, opts)
t.Run(name+" "+string(client.Type()), func(t *testing.T) {
fn(t, client, csapiAlice)
Expand Down
Loading