-
Notifications
You must be signed in to change notification settings - Fork 2
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Store shard metadata in S3, add a tailing facility #5
base: master
Are you sure you want to change the base?
Changes from all commits
d066ac9
634562d
db02b28
5e551d6
5945977
57fc259
2fe1a01
bcf27c0
dfdaccd
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,5 @@ | ||
build/ | ||
src/github.com/ | ||
src/gopkg.in/ | ||
/.go/ | ||
cscope.* |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,3 +16,10 @@ build: | |
|
||
clean: | ||
rm -rf build | ||
|
||
cscope: | ||
find $$GOPATH/src -type f -iname "*.go"> cscope.files | ||
cscope -b -k | ||
|
||
tags: | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
package triton | ||
|
||
import ( | ||
"fmt" | ||
"regexp" | ||
"strconv" | ||
"strings" | ||
"time" | ||
) | ||
|
||
// ArchiveKey is a struct representing the path value for the Triton S3 keys | ||
type ArchiveKey struct { | ||
Client string | ||
Stream string | ||
Time time.Time | ||
} | ||
|
||
// Path encodes the ArchiveKey to a string path | ||
func (a ArchiveKey) Path() string { | ||
return fmt.Sprintf("%04d%02d%02d/%s-%d.tri", a.Time.Year(), a.Time.Month(), a.Time.Day(), a.fullStreamName(), a.Time.Unix()) | ||
} | ||
|
||
const ( | ||
metadataSuffix = ".metadata" | ||
) | ||
|
||
// MetadataPath encodes the ArchiveKey to a string path with the metadata suffix applied | ||
func (a ArchiveKey) MetadataPath() string { | ||
return a.Path() + metadataSuffix | ||
} | ||
|
||
// fullStreamName returns the full stream name (stream + "-" + client) if there is a client name or just stream | ||
func (a ArchiveKey) fullStreamName() (stream string) { | ||
stream = a.Stream | ||
if a.Client != "" { | ||
stream += "-" + a.Client | ||
} | ||
return | ||
} | ||
|
||
// PathPrefix returns the string key prefix without the timestamp | ||
func (a ArchiveKey) PathPrefix() string { | ||
return fmt.Sprintf("%04d%02d%02d/%s-", a.Time.Year(), a.Time.Month(), a.Time.Day(), a.fullStreamName()) | ||
} | ||
|
||
func (a ArchiveKey) Equal(other ArchiveKey) (result bool) { | ||
if a.Stream != other.Stream { | ||
return false | ||
} | ||
if a.Time.Truncate(time.Second) != other.Time.Truncate(time.Second) { | ||
return false | ||
} | ||
if a.Client != other.Client { | ||
return false | ||
} | ||
return true | ||
} | ||
|
||
var archiveKeyPattern = regexp.MustCompile(`^/?(?P<day>\d{8})\/(?P<stream>.+)\-(?P<ts>\d+)\.tri$`) | ||
|
||
// Decode an archive S3 key into an ArchiveKey | ||
func DecodeArchiveKey(keyName string) (a ArchiveKey, err error) { | ||
res := archiveKeyPattern.FindStringSubmatch(keyName) | ||
if res == nil { | ||
err = fmt.Errorf("Invalid key name") | ||
return | ||
} | ||
ts, err := strconv.ParseInt(res[3], 10, 64) | ||
if err != nil { | ||
err = fmt.Errorf("Failed to parse timestamp value: %s", err.Error()) | ||
return | ||
} | ||
a.Time = time.Unix(ts, 0) | ||
nameParts := strings.Split(res[2], "-") | ||
if len(nameParts) != 2 { | ||
err = fmt.Errorf("Failure parsing stream name: %v", res[2]) | ||
return | ||
} | ||
a.Stream = nameParts[0] | ||
a.Client = nameParts[1] | ||
return | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
package triton | ||
|
||
import ( | ||
"testing" | ||
"time" | ||
) | ||
|
||
func TestArchiveKeyPathCodec(t *testing.T) { | ||
aTime := time.Now() | ||
archiveKey := ArchiveKey{Time: aTime, Stream: "a", Client: "b"} | ||
archiveKey2, err := DecodeArchiveKey(archiveKey.Path()) | ||
|
||
if err != nil { | ||
t.Fatalf("unexpected error: %s", err.Error()) | ||
} | ||
if !archiveKey.Equal(archiveKey2) { | ||
t.Fatalf("expecting %+v == %+v", archiveKey, archiveKey2) | ||
} | ||
|
||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
package triton | ||
|
||
import ( | ||
"bytes" | ||
"encoding/json" | ||
"fmt" | ||
"github.com/aws/aws-sdk-go/aws" | ||
"github.com/aws/aws-sdk-go/aws/awserr" | ||
"github.com/aws/aws-sdk-go/service/s3" | ||
"github.com/aws/aws-sdk-go/service/s3/s3manager" | ||
"io" | ||
"io/ioutil" | ||
"sort" | ||
"strings" | ||
"time" | ||
) | ||
|
||
// ArchiveRepository manages reading and writing Archives | ||
type ArchiveRepository struct { | ||
s3Service S3Service | ||
s3Uploader S3UploaderService | ||
stream string | ||
bucket string | ||
client string | ||
} | ||
|
||
func NewArchiveRepository(s3Service S3Service, s3Uploader S3UploaderService, bucket string, stream string, client string) *ArchiveRepository { | ||
return &ArchiveRepository{ | ||
s3Service: s3Service, | ||
s3Uploader: s3Uploader, | ||
bucket: bucket, | ||
stream: stream, | ||
client: client, | ||
} | ||
} | ||
|
||
// Upload the archive for a stream at Time t | ||
func (ar *ArchiveRepository) Upload(t time.Time, contents io.ReadCloser, metadata *StreamMetadata) (err error) { | ||
archiveKey := ArchiveKey{Stream: ar.stream, Time: t, Client: ar.client} | ||
_, err = ar.s3Uploader.Upload(&s3manager.UploadInput{ | ||
Bucket: aws.String(ar.bucket), | ||
Key: aws.String(archiveKey.Path()), | ||
Body: contents, | ||
}) | ||
if err != nil { | ||
if awsErr, ok := err.(awserr.Error); ok { | ||
return fmt.Errorf("Failed to upload: %v (%v)", awsErr.Code(), awsErr.Message()) | ||
} | ||
return | ||
} | ||
var buf bytes.Buffer | ||
err = json.NewEncoder(&buf).Encode(metadata) | ||
if err != nil { | ||
return | ||
} | ||
_, err = ar.s3Uploader.Upload(&s3manager.UploadInput{ | ||
Bucket: aws.String(ar.bucket), | ||
Key: aws.String(archiveKey.MetadataPath()), | ||
Body: ioutil.NopCloser(&buf), | ||
}) | ||
if err != nil { | ||
if awsErr, ok := err.(awserr.Error); ok { | ||
return fmt.Errorf("Failed to upload metadata: %v (%v)", awsErr.Code(), awsErr.Message()) | ||
} | ||
return | ||
} | ||
return | ||
} | ||
|
||
// ArchivesAtDate lists all the archives for a stream stored at a UTC date represented by aDate | ||
func (ar *ArchiveRepository) ArchivesAtDate(aDate time.Time) (result []StoreArchive, err error) { | ||
keyPrefix := ArchiveKey{Time: aDate, Stream: ar.stream, Client: ar.client}.PathPrefix() | ||
keys := []string{} | ||
err = ar.s3Service.ListObjectsPages(&s3.ListObjectsInput{ | ||
Bucket: aws.String(ar.bucket), | ||
Prefix: aws.String(keyPrefix), | ||
}, func(output *s3.ListObjectsOutput, lastPage bool) (shouldContinue bool) { | ||
for _, object := range output.Contents { | ||
keys = append(keys, *object.Key) | ||
} | ||
return true | ||
}) | ||
if err != nil { | ||
return | ||
} | ||
sort.Sort(sort.StringSlice(keys)) | ||
for _, key := range keys { | ||
if strings.HasSuffix(key, metadataSuffix) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just a thought, but maybe you should just attempt to create archives for every key it finds, and let the DecodeArchiveKey figure out if it's a valid key to use or not. Seems like it would be safer to allow unrecognizable keys to exist for future backwards compatibility reasons too? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Seems reasonable On Fri, Dec 18, 2015 at 3:05 PM, Rhett Garber [email protected]
|
||
continue | ||
} | ||
var sa StoreArchive | ||
sa, err = NewStoreArchive(ar.bucket, key, ar.s3Service) | ||
if err != nil { | ||
err = fmt.Errorf("failed to create store archive for %q: %s", key, err) | ||
return | ||
} | ||
result = append(result, sa) | ||
} | ||
return | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think we need to rethink these 'public' interfaces.
Like perhaps the interfaces should all be "NewStreamReader"-ish with combinations for: