-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathmain_test.go
459 lines (388 loc) · 13.5 KB
/
main_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
package main
import (
"database/sql"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/kinesis"
"github.com/jackc/pgx"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"gopkg.in/ory-am/dockertest.v3"
"log"
"os"
"path/filepath"
"testing"
"time"
_ "github.com/lib/pq"
"encoding/json"
)
var aws_region = "us-east-1"
var aws_access_key_id = "FAKE"
var aws_secret_access_key = "FAKE"
var host = "localhost"
var slotName = "pg_kinesis"
var kinesisImage = "vsouza/kinesis-local"
var kinesisImageTag = "latest"
var kinesisStream = "pgkinesis"
var kinesisShards = 3
var dbImage = "postgres"
var dbImageTag = "9.6"
var dbUser = "pgkinesis"
var dbPass = "pgkinesis"
var dbName = "pgkinesis"
var dbPort = "5432"
var testDb *sql.DB
var testKinesisClient *kinesis.Kinesis
var dbConnString string
var dockerPool *dockertest.Pool
var kinesisContainer *dockertest.Resource
var dbContainer *dockertest.Resource
// ** Helper Functions **
// Simple helper function to give the user some information about the Docker containers we start
// up on their behalf.
func logContainerInfo(container *dockertest.Resource) {
id := container.Container.ID
image := container.Container.Name
ports := container.Container.Config.ExposedPorts
log.Printf("Started up %s (%s), listening on %s", image, id, ports)
}
// Sets up all of the backend resources required to run live integration tests.
//
// Spins up a Postgres and "Local" Kinesis container. Stores access information to both of these
// via global variables above. The Kinesis object is primarily used so that we can create the
// streams and inspect the data we put there. The Database object is used to create database
// changes that we expect to see in Kinesis.
//
func setUp() error {
log.Println("Starting up Docker resources")
var err error
// Set up our local Docker connection dockerPool
dockerPool, err = dockertest.NewPool("")
if err != nil { return err }
// Start up Postgres - customize it by passing in our own entrypoint directory that tweaks
// the Postgres configuration to enable wal replicatin.
_path, _ := filepath.Abs("resources/docker-entrypoint-initdb.d")
dbContainer, err = dockerPool.RunWithOptions(&dockertest.RunOptions{
Repository: dbImage,
Tag: dbImageTag,
Mounts: []string{_path + ":/docker-entrypoint-initdb.d"},
Env: []string{
"POSTGRES_PASSWORD=" + dbPass,
"POSTGRES_USER=" + dbUser,
"POSTGRES_DB=" + dbName,
"POSTGRES_PORT=" + dbPort,
},
})
if err != nil { return err }
// Wait for Docker to tell us the container is up, and then create a DB connection
err = dockerPool.Retry(func() error {
var err error
// Stored globally so that we can pass this into the mainLoop() for testing
dbConnString = fmt.Sprintf("postgres://" + dbUser + ":" + dbPass + "@" + host + ":%s/%s" +
"?sslmode=disable", dbContainer.GetPort("5432/tcp"), dbName)
testDb, err = sql.Open("postgres", dbConnString)
if err != nil { return err }
// Ensures that later when we call pg_kinesis,
// which has no arguments and relies on the LibPQ os.GetEnv() function,
// that it knows how to connect to our Test DB.
os.Setenv("PGHOST", host)
os.Setenv("PGUSER", dbUser)
os.Setenv("PGPASSWORD", dbPass)
os.Setenv("PGPORT", string(dbContainer.GetPort("5432/tcp")))
return testDb.Ping()
});
if err != nil { return err }
// Start up Fake Kinesis - note, for some strange reason the container requires that we pass
// it at least one option, or it won't start up. Even though this option claims to have a
// default value, this seems to work well enough.
kinesisOptions := &dockertest.RunOptions{
Repository: kinesisImage,
Tag: kinesisImageTag,
Cmd: []string{"--createStreamMS 0"},
}
kinesisContainer, err = dockerPool.RunWithOptions(kinesisOptions)
if err != nil { return err }
// Wait for Docker to tell us the container is up, and then create the test-stream in the
// local Kinesis service
err = dockerPool.Retry(func() error {
var err error
// Fake out the AWS Region Environment Variable thats required
os.Setenv("AWS_REGION", aws_region)
// Generate our fake Kinesis Amazon client - this will be used to prepare the kinesis streams for testing.
testKinesisClient = kinesis.New(session.New(aws.NewConfig().
WithRegion(aws_region).
WithCredentials(credentials.NewStaticCredentials(aws_access_key_id, aws_secret_access_key, "")).
WithEndpoint("http://" + host + ":" + kinesisContainer.GetPort("4567/tcp")).
WithDisableSSL(true),
))
// In the main package, set our KinesisClient object with this one we just created.
kinesisClient = testKinesisClient
// Create our streams - no need for the data to come back..
_, err = testKinesisClient.CreateStream(&kinesis.CreateStreamInput{
ShardCount: aws.Int64(int64(kinesisShards)),
StreamName: aws.String(kinesisStream),
})
return err
});
if err != nil { return err }
// Just some debug info
logContainerInfo(dbContainer);
logContainerInfo(kinesisContainer);
return nil
}
// Ensure that the Docker resources are torn down after any successful test. Also
// called aggressively during our setUp process just in case that fails.
func tearDown() {
log.Println("Tearing down resources")
if dbContainer != nil { dockerPool.Purge(dbContainer) }
if kinesisContainer != nil { dockerPool.Purge(kinesisContainer) }
}
// Maybe move this into another test file?
type TestUser struct{
id int
age int
first_name string
last_name string
email string
}
func createTestDatabase() error {
var tableCreateStatement = `
CREATE TABLE users (
id SERIAL PRIMARY KEY,
age INT,
first_name TEXT,
last_name TEXT,
email TEXT UNIQUE NOT NULL
);`
_, err := testDb.Exec(tableCreateStatement)
if err != nil {
return errors.Wrapf(err, "Test Failure - Unable to create table")
}
return nil
}
func createTestUser(age int, firstName string, lastName string,
email string) error {
var err error
insertStatement := `
INSERT INTO users
(age, first_name, last_name, email)
VALUES($1, $2, $3, $4)
RETURNING id`
_, err = testDb.Exec(insertStatement, age, firstName, lastName, email)
if err != nil {
return errors.Wrapf(err, "Test Failure - Unable create user")
}
return nil
}
// Function to connect into Kinesis and pull all the data off of it
func getKinesisData() ([]*kinesis.Record, error) {
var records []*kinesis.Record
// Get our stream description back from Kinesis...
stream, err := testKinesisClient.DescribeStream(
&kinesis.DescribeStreamInput{StreamName: &kinesisStream})
if err != nil {
return nil, errors.Wrap(err, "Unable to describe Kinesis Streams")
}
// Get our list of shards. For each shard, we're going to get all the data.
// We walk through each shard in a row, and return all the data in that order.
// The pattern looks like this:
//
// Shard1: RecordA,RecordB,RecordC
// Shard2: Record1,Record2,Record3
//
// Turns into:
// [RecordA, RecordB, RecordC, Record1, Record2, Record3]
//
for _, shard := range stream.StreamDescription.Shards {
var shardIteratorType = "TRIM_HORIZON"
var limit int64 = 10000
// Get a fresh shard iterator for this stream
it, err := testKinesisClient.GetShardIterator(&kinesis.GetShardIteratorInput{
ShardId: shard.ShardId,
ShardIteratorType: &shardIteratorType,
StreamName: &kinesisStream,
})
if err != nil {
return nil, errors.Wrap(err, "Unable to get our Kinesis Shard Iterator")
}
// Get the records off that shard
req, err := testKinesisClient.GetRecords(&kinesis.GetRecordsInput{
Limit: &limit,
ShardIterator: it.ShardIterator,
})
if err != nil {
return nil, errors.Wrapf(err, "Unable to get our Kinesis Records from %s", shard)
}
// Debug
log.Printf("%s has %d records", *shard.ShardId, len(req.Records))
for _, rec := range req.Records {
records = append(records, rec)
}
}
return records, nil
}
// Overrides the main test runner and wraps it with the setUp() and tearDown() function calls.
func TestMain(m *testing.M) {
// Run the setUp - if it fails for any reason, clean up after ourselves.
err := setUp()
if err != nil {
tearDown()
log.Fatalf("Could not start resource: %s", err)
}
// Run our tests and save an exit code.
code := m.Run()
tearDown()
// Clean up and exit!
os.Exit(code)
}
// ** Tests Begin Here **
// Simple test to validate that creating our replication slot from scratch works
func TestCreateReplicationSlot(t *testing.T) {
assert := assert.New(t)
// First, set main.done to False. This will trick the app into stopping before it tries to
// startup any goroutines.
done.SetTo(true)
// Call out to the mainLoop - this should exit back right away,
// so to keep the test simple we don't do this in the background.
sourceConfig, err := pgx.ParseConnectionString(dbConnString)
mainLoop(slotName, false, true, kinesisStream, true, sourceConfig)
// Verify that the encoding type of the slot is test_decoding, as expected
var expected_plugin = "test_decoding"
var actual_plugin string
err = testDb.QueryRow(
"SELECT plugin FROM pg_replication_slots WHERE slot_name = '" + slotName +"';").
Scan(&actual_plugin)
if err != nil { t.Errorf("Error: %s", err) }
assert.Equal(actual_plugin, expected_plugin, "Replication slot type should match")
}
// Full test suite here
//
// Creates a new pg_kinesis mainLoop() against a live database, inserts a thousand records,
// streams the records to Kinesis,
// and finally picks the records up from Kinesis and validates the content of each and every one
// of them.
//
func TestMainLoop(t *testing.T) {
assert := assert.New(t)
// How many records should we INSERT Into the database?
var testRecordCount = 1000
var err error
// Will capture the exit-status of pg_kinesis when we gracefully stop it
resCh := make(chan error)
// Will be used to signal that the initiallyConnected bool has been set by pg_kinesis.
// This ensures that we don't try to write anything to the DB until after pg_kinesis is
// connected.
connectedCh := make(chan bool)
// Ensure we are going to run the full mainLoop()
done.SetTo(false)
// Call out to the main loop - in the background.
// Output is pumped into resCh for validation later.
sourceConfig, _ := pgx.ParseConnectionString(dbConnString)
go func(ch chan error) {
err := mainLoop(slotName, false, false, kinesisStream, false, sourceConfig)
ch <- err
}(resCh)
// In the background, check for the initiallyConnected variable to flip to true. Once it has,
// we know that pg_kinesis is connected up to postgres properly and we can begin writing to
// the database.
go func(ch chan bool) {
for !initiallyConnected {
time.Sleep(1 * time.Second)
}
ch <- true
}(connectedCh)
// Block - either for pg_kinesis to exit (which would be unexpected),
// for our initially connected check to pass (thats the hope),
// or finally for a timeout (after 10s) which indicates something went wrong.
select {
case <- resCh:
t.Error("Exited unexpectedly")
case <- connectedCh:
log.Printf("MainLoop connected")
case <- time.After(10 * time.Second):
t.Error("Exited unexpectedly with timeout")
}
// Insert data into the database
err = createTestDatabase()
assert.Nil(err)
// Create test users
for i := 1; i <= testRecordCount; i++ {
err = createTestUser(10+i, "Bob"+string(i), "Barker", "bob"+string(i)+"@test.com")
assert.Nil(err)
}
// TEMP - need to wait until records have been flushed
time.Sleep(3 * time.Second)
// Get our data from the Kinesis stream
records, err := getKinesisData()
assert.Nil(err)
// Verify that we got all the records back
assert.Equal(testRecordCount, len(records))
// Verify that each record is correct.
for _, record := range records {
// Test that the PartitionKey was set correctly. Note, this currently means that if you
// have a heavy-write volume on a single table, you will be unable to scale past 1 shard
// for that table. This should be refactored ASAP to be configurable.
assert.EqualValues("public.users", string(*record.PartitionKey))
// Deserialize the data back into jsonWalEntry objects so we can validate their content
//
// The data in Kinesis is a []byte array that can be decoded into a single JSON line.
// The JSON looks like this:
// {
// "time": "2018-05-04T10:31:44-07:00",
// "lsn": "0/1514C80",
// "table": "public.users",
// "operation": "INSERT",
// "columns": {
// "age": {
// "new": {
// "q":"false",
// "t":"integer"
// "v":"15"
// }
// },
// "email": {
// "new": {
// "q": "true",
// "t": "text"
// "v": "bob\[email protected]"
// }
// },
// "first_name": {
// "new": {
// "q": "true",
// "t":"text",
// "v":"Bob\u0005"
// }
// },
// "id": {
// "new": {
// "q": "false",
// "t": "integer",
// "v": "5"
// }
// },
// "last_name": {
// "new": {
// "q": "true",
// "t": "text",
// "v": "Barker"
// }
// }
// }
// }
var entry *jsonWalEntry
err := json.Unmarshal(record.Data, &entry)
assert.Nil(err)
assert.EqualValues("public.users", string(*entry.Table))
assert.EqualValues("INSERT", string(*entry.Operation))
}
// Final teardown!
done.SetTo(true)
select {
case <-resCh:
case <- time.After(10 * time.Second):
t.Error("Teardown took longer than 10s!")
}
}