-
Notifications
You must be signed in to change notification settings - Fork 575
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add v6 vulnerability & blob stores (#2243)
* add vulnerability and blob stores Signed-off-by: Alex Goodman <[email protected]> * allow for multiple severity types Signed-off-by: Alex Goodman <[email protected]> * add type for db ids Signed-off-by: Alex Goodman <[email protected]> * explain why we drop blob digests Signed-off-by: Alex Goodman <[email protected]> --------- Signed-off-by: Alex Goodman <[email protected]>
- Loading branch information
Showing
11 changed files
with
736 additions
and
16 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,115 @@ | ||
package v6 | ||
|
||
import ( | ||
"encoding/json" | ||
"errors" | ||
"fmt" | ||
"strings" | ||
|
||
"gorm.io/gorm" | ||
|
||
"github.com/anchore/grype/internal/log" | ||
) | ||
|
||
type blobable interface { | ||
getBlobValue() any | ||
setBlobID(ID) | ||
} | ||
|
||
type blobStore struct { | ||
db *gorm.DB | ||
} | ||
|
||
func newBlobStore(db *gorm.DB) *blobStore { | ||
return &blobStore{ | ||
db: db, | ||
} | ||
} | ||
|
||
func (s *blobStore) addBlobable(bs ...blobable) error { | ||
for i := range bs { | ||
b := bs[i] | ||
v := b.getBlobValue() | ||
if v == nil { | ||
continue | ||
} | ||
bl := newBlob(v) | ||
|
||
if err := s.addBlobs(bl); err != nil { | ||
return err | ||
} | ||
|
||
b.setBlobID(bl.ID) | ||
} | ||
return nil | ||
} | ||
|
||
func (s *blobStore) addBlobs(blobs ...*Blob) error { | ||
for i := range blobs { | ||
v := blobs[i] | ||
digest := v.computeDigest() | ||
|
||
var blobDigest BlobDigest | ||
err := s.db.Where("id = ?", digest).First(&blobDigest).Error | ||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { | ||
return fmt.Errorf("failed to get blob digest: %w", err) | ||
} | ||
|
||
if blobDigest.BlobID != 0 { | ||
v.ID = blobDigest.BlobID | ||
continue | ||
} | ||
|
||
if err := s.db.Create(v).Error; err != nil { | ||
return fmt.Errorf("failed to create blob: %w", err) | ||
} | ||
|
||
blobDigest = BlobDigest{ | ||
ID: digest, | ||
BlobID: v.ID, | ||
} | ||
if err := s.db.Create(blobDigest).Error; err != nil { | ||
return fmt.Errorf("failed to create blob digest: %w", err) | ||
} | ||
} | ||
return nil | ||
} | ||
|
||
func (s *blobStore) getBlobValue(id ID) (string, error) { | ||
var blob Blob | ||
if err := s.db.First(&blob, id).Error; err != nil { | ||
return "", err | ||
} | ||
return blob.Value, nil | ||
} | ||
|
||
func (s *blobStore) Close() error { | ||
var count int64 | ||
if err := s.db.Model(&Blob{}).Count(&count).Error; err != nil { | ||
return fmt.Errorf("failed to count blobs: %w", err) | ||
} | ||
|
||
log.WithFields("records", count).Trace("finalizing blobs") | ||
|
||
// we use the blob_digests table when writing entries to ensure we have unique blobs, but for distribution this | ||
// is no longer needed and saves on space considerably. For this reason, we drop the table after we are | ||
// done writing blobs so that the DB is always in a distributable state. | ||
if err := s.db.Exec("DROP TABLE blob_digests").Error; err != nil { | ||
return fmt.Errorf("failed to drop blob digests: %w", err) | ||
} | ||
return nil | ||
} | ||
|
||
func newBlob(obj any) *Blob { | ||
sb := strings.Builder{} | ||
enc := json.NewEncoder(&sb) | ||
enc.SetEscapeHTML(false) | ||
|
||
if err := enc.Encode(obj); err != nil { | ||
panic("could not marshal object to json") | ||
} | ||
|
||
return &Blob{ | ||
Value: sb.String(), | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
package v6 | ||
|
||
import ( | ||
"testing" | ||
|
||
"github.com/stretchr/testify/assert" | ||
"github.com/stretchr/testify/require" | ||
) | ||
|
||
func TestBlobWriter_AddBlobs(t *testing.T) { | ||
db := setupTestDB(t) | ||
writer := newBlobStore(db) | ||
|
||
obj1 := map[string]string{"key": "value1"} | ||
obj2 := map[string]string{"key": "value2"} | ||
|
||
blob1 := newBlob(obj1) | ||
blob2 := newBlob(obj2) | ||
blob3 := newBlob(obj1) // same as blob1 | ||
|
||
err := writer.addBlobs(blob1, blob2, blob3) | ||
require.NoError(t, err) | ||
|
||
require.NotZero(t, blob1.ID) | ||
require.Equal(t, blob1.ID, blob3.ID) // blob3 should have the same ID as blob1 (natural deduplication) | ||
|
||
var result1 Blob | ||
require.NoError(t, db.Where("id = ?", blob1.ID).First(&result1).Error) | ||
assert.Equal(t, blob1.Value, result1.Value) | ||
|
||
var result2 Blob | ||
require.NoError(t, db.Where("id = ?", blob2.ID).First(&result2).Error) | ||
assert.Equal(t, blob2.Value, result2.Value) | ||
} | ||
|
||
func TestBlobWriter_Close(t *testing.T) { | ||
db := setupTestDB(t) | ||
writer := newBlobStore(db) | ||
|
||
obj := map[string]string{"key": "value"} | ||
blob := newBlob(obj) | ||
require.NoError(t, writer.addBlobs(blob)) | ||
|
||
// ensure the blob digest table is created | ||
var blobDigest BlobDigest | ||
require.NoError(t, db.First(&blobDigest).Error) | ||
require.NotZero(t, blobDigest.ID) | ||
|
||
err := writer.Close() | ||
require.NoError(t, err) | ||
|
||
// ensure the blob digest table is deleted | ||
err = db.First(&blobDigest).Error | ||
require.ErrorContains(t, err, "no such table: blob_digests") | ||
} | ||
|
||
func TestBlob_computeDigest(t *testing.T) { | ||
assert.Equal(t, "xxh64:0e6882304e9adbd5", Blob{Value: "test content"}.computeDigest()) | ||
|
||
assert.Equal(t, "xxh64:ea0c19ae9fbd93b3", Blob{Value: "different content"}.computeDigest()) | ||
} |
Oops, something went wrong.