diff --git a/.vscode/settings.json b/.vscode/settings.json
index 18ce17c..343ffc3 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -6,6 +6,7 @@
"blobtype",
"blobtypes",
"bmap",
+ "chacha",
"cinode",
"cinodefs",
"cipherfactory",
@@ -15,13 +16,16 @@
"dynamiclink",
"elink",
"fifos",
+ "fsys",
"goveralls",
"Hasher",
+ "homefile",
"jbenet",
"protobuf",
"securefifo",
"shogo",
"stretchr",
+ "subdir",
"testblobs",
"testvectors",
"validatingreader"
diff --git a/cmd/static_datastore_builder/main.go b/cmd/static_datastore_builder/main.go
index 121d74e..639ece2 100644
--- a/cmd/static_datastore_builder/main.go
+++ b/cmd/static_datastore_builder/main.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,8 +16,15 @@ limitations under the License.
package main
-import "github.com/cinode/go/pkg/cmd/static_datastore"
+import (
+ "context"
+ "log"
+
+ "github.com/cinode/go/pkg/cmd/static_datastore"
+)
func main() {
- static_datastore.Execute()
+ if err := static_datastore.Execute(context.Background()); err != nil {
+ log.Fatal(err.Error())
+ }
}
diff --git a/pkg/blenc/datastore.go b/pkg/blenc/datastore.go
index e10fd26..07fe94b 100644
--- a/pkg/blenc/datastore.go
+++ b/pkg/blenc/datastore.go
@@ -25,7 +25,6 @@ import (
"github.com/cinode/go/pkg/blobtypes"
"github.com/cinode/go/pkg/common"
"github.com/cinode/go/pkg/datastore"
- "github.com/cinode/go/pkg/internal/utilities/cipherfactory"
"github.com/cinode/go/pkg/internal/utilities/securefifo"
)
@@ -51,7 +50,7 @@ type beDatastore struct {
newSecureFifo secureFifoGenerator
}
-func (be *beDatastore) Open(ctx context.Context, name common.BlobName, key cipherfactory.Key) (io.ReadCloser, error) {
+func (be *beDatastore) Open(ctx context.Context, name *common.BlobName, key *common.BlobKey) (io.ReadCloser, error) {
switch name.Type() {
case blobtypes.Static:
return be.openStatic(ctx, name, key)
@@ -66,9 +65,9 @@ func (be *beDatastore) Create(
blobType common.BlobType,
r io.Reader,
) (
- common.BlobName,
- cipherfactory.Key,
- AuthInfo,
+ *common.BlobName,
+ *common.BlobKey,
+ *common.AuthInfo,
error,
) {
switch blobType {
@@ -80,7 +79,7 @@ func (be *beDatastore) Create(
return nil, nil, nil, blobtypes.ErrUnknownBlobType
}
-func (be *beDatastore) Update(ctx context.Context, name common.BlobName, authInfo AuthInfo, key cipherfactory.Key, r io.Reader) error {
+func (be *beDatastore) Update(ctx context.Context, name *common.BlobName, authInfo *common.AuthInfo, key *common.BlobKey, r io.Reader) error {
switch name.Type() {
case blobtypes.Static:
return be.updateStatic(ctx, name, authInfo, key, r)
@@ -90,10 +89,10 @@ func (be *beDatastore) Update(ctx context.Context, name common.BlobName, authInf
return blobtypes.ErrUnknownBlobType
}
-func (be *beDatastore) Exists(ctx context.Context, name common.BlobName) (bool, error) {
+func (be *beDatastore) Exists(ctx context.Context, name *common.BlobName) (bool, error) {
return be.ds.Exists(ctx, name)
}
-func (be *beDatastore) Delete(ctx context.Context, name common.BlobName) error {
+func (be *beDatastore) Delete(ctx context.Context, name *common.BlobName) error {
return be.ds.Delete(ctx, name)
}
diff --git a/pkg/blenc/datastore_dynamic_link.go b/pkg/blenc/datastore_dynamic_link.go
index 8935911..fdb670a 100644
--- a/pkg/blenc/datastore_dynamic_link.go
+++ b/pkg/blenc/datastore_dynamic_link.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,7 +17,6 @@ limitations under the License.
package blenc
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -25,7 +24,6 @@ import (
"github.com/cinode/go/pkg/common"
"github.com/cinode/go/pkg/internal/blobtypes/dynamiclink"
- "github.com/cinode/go/pkg/internal/utilities/cipherfactory"
)
var (
@@ -37,8 +35,8 @@ var (
func (be *beDatastore) openDynamicLink(
ctx context.Context,
- name common.BlobName,
- key cipherfactory.Key,
+ name *common.BlobName,
+ key *common.BlobKey,
) (
io.ReadCloser,
error,
@@ -77,9 +75,9 @@ func (be *beDatastore) createDynamicLink(
ctx context.Context,
r io.Reader,
) (
- common.BlobName,
- cipherfactory.Key,
- AuthInfo,
+ *common.BlobName,
+ *common.BlobKey,
+ *common.AuthInfo,
error,
) {
version := be.generateVersion()
@@ -109,9 +107,9 @@ func (be *beDatastore) createDynamicLink(
func (be *beDatastore) updateDynamicLink(
ctx context.Context,
- name common.BlobName,
- authInfo AuthInfo,
- key cipherfactory.Key,
+ name *common.BlobName,
+ authInfo *common.AuthInfo,
+ key *common.BlobKey,
r io.Reader,
) error {
newVersion := be.generateVersion()
@@ -127,10 +125,10 @@ func (be *beDatastore) updateDynamicLink(
}
// Sanity checks
- if !bytes.Equal(encryptionKey, key) {
+ if !encryptionKey.Equal(key) {
return ErrDynamicLinkUpdateFailedWrongKey
}
- if !bytes.Equal(name, dl.BlobName()) {
+ if !name.Equal(dl.BlobName()) {
return ErrDynamicLinkUpdateFailedWrongName
}
diff --git a/pkg/blenc/datastore_dynamic_link_test.go b/pkg/blenc/datastore_dynamic_link_test.go
index f9777d4..f124b6b 100644
--- a/pkg/blenc/datastore_dynamic_link_test.go
+++ b/pkg/blenc/datastore_dynamic_link_test.go
@@ -34,18 +34,18 @@ import (
type dsWrapper struct {
datastore.DS
- openFn func(ctx context.Context, name common.BlobName) (io.ReadCloser, error)
- updateFn func(ctx context.Context, name common.BlobName, r io.Reader) error
+ openFn func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error)
+ updateFn func(ctx context.Context, name *common.BlobName, r io.Reader) error
}
-func (w *dsWrapper) Open(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (w *dsWrapper) Open(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
if w.openFn != nil {
return w.openFn(ctx, name)
}
return w.DS.Open(ctx, name)
}
-func (w *dsWrapper) Update(ctx context.Context, name common.BlobName, r io.Reader) error {
+func (w *dsWrapper) Update(ctx context.Context, name *common.BlobName, r io.Reader) error {
if w.updateFn != nil {
return w.updateFn(ctx, name, r)
}
@@ -68,7 +68,7 @@ func TestDynamicLinkErrors(t *testing.T) {
t.Run("handle error while opening blob", func(t *testing.T) {
injectedErr := errors.New("test")
- dsw.openFn = func(ctx context.Context, name common.BlobName) (io.ReadCloser, error) { return nil, injectedErr }
+ dsw.openFn = func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) { return nil, injectedErr }
rc, err := be.Open(context.Background(), bn, key)
require.ErrorIs(t, err, injectedErr)
@@ -84,7 +84,7 @@ func TestDynamicLinkErrors(t *testing.T) {
t.Run(fmt.Sprintf("error at byte %d", i), func(t *testing.T) {
- dsw.openFn = func(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+ dsw.openFn = func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
origRC, err := dsw.DS.Open(ctx, name)
require.NoError(t, err)
@@ -124,9 +124,9 @@ func TestDynamicLinkErrors(t *testing.T) {
bn, key, ai, err := be.Create(context.Background(), blobtypes.DynamicLink, bytes.NewReader(nil))
require.ErrorIs(t, err, injectedErr)
- require.Nil(t, bn)
- require.Nil(t, key)
- require.Nil(t, ai)
+ require.Empty(t, bn)
+ require.Empty(t, key)
+ require.Empty(t, ai)
be.(*beDatastore).rand = rand.Reader
@@ -135,13 +135,13 @@ func TestDynamicLinkErrors(t *testing.T) {
t.Run("fail to store new dynamic link blob", func(t *testing.T) {
injectedErr := errors.New("test")
- dsw.updateFn = func(ctx context.Context, name common.BlobName, r io.Reader) error { return injectedErr }
+ dsw.updateFn = func(ctx context.Context, name *common.BlobName, r io.Reader) error { return injectedErr }
bn, key, ai, err := be.Create(context.Background(), blobtypes.DynamicLink, bytes.NewReader(nil))
require.ErrorIs(t, err, injectedErr)
- require.Nil(t, bn)
- require.Nil(t, key)
- require.Nil(t, ai)
+ require.Empty(t, bn)
+ require.Empty(t, key)
+ require.Empty(t, ai)
dsw.updateFn = nil
})
@@ -152,7 +152,7 @@ func TestDynamicLinkErrors(t *testing.T) {
bn, key, ai, err := be.Create(context.Background(), blobtypes.DynamicLink, bytes.NewReader(nil))
require.NoError(t, err)
- dsw.updateFn = func(ctx context.Context, name common.BlobName, r io.Reader) error { return injectedErr }
+ dsw.updateFn = func(ctx context.Context, name *common.BlobName, r io.Reader) error { return injectedErr }
err = be.Update(context.Background(), bn, ai, key, bytes.NewReader(nil))
require.ErrorIs(t, err, injectedErr)
diff --git a/pkg/blenc/datastore_static.go b/pkg/blenc/datastore_static.go
index d28dec8..1dc28c4 100644
--- a/pkg/blenc/datastore_static.go
+++ b/pkg/blenc/datastore_static.go
@@ -17,7 +17,6 @@ limitations under the License.
package blenc
import (
- "bytes"
"context"
"crypto/sha256"
"errors"
@@ -33,14 +32,14 @@ var (
ErrCanNotUpdateStaticBlob = errors.New("blob update is not supported for static blobs")
)
-func (be *beDatastore) openStatic(ctx context.Context, name common.BlobName, key cipherfactory.Key) (io.ReadCloser, error) {
+func (be *beDatastore) openStatic(ctx context.Context, name *common.BlobName, key *common.BlobKey) (io.ReadCloser, error) {
rc, err := be.ds.Open(ctx, name)
if err != nil {
return nil, err
}
- scr, err := cipherfactory.StreamCipherReader(key, key.DefaultIV(), rc)
+ scr, err := cipherfactory.StreamCipherReader(key, cipherfactory.DefaultIV(key), rc)
if err != nil {
return nil, err
}
@@ -54,7 +53,7 @@ func (be *beDatastore) openStatic(ctx context.Context, name common.BlobName, key
Reader: validatingreader.CheckOnEOF(
io.TeeReader(scr, keyGenerator),
func() error {
- if !bytes.Equal(key, keyGenerator.Generate()) {
+ if !key.Equal(keyGenerator.Generate()) {
return blobtypes.ErrValidationFailed
}
return nil
@@ -68,9 +67,9 @@ func (be *beDatastore) createStatic(
ctx context.Context,
r io.Reader,
) (
- common.BlobName,
- cipherfactory.Key,
- AuthInfo,
+ *common.BlobName,
+ *common.BlobKey,
+ *common.AuthInfo,
error,
) {
tempWriteBufferPlain, err := be.newSecureFifo()
@@ -92,7 +91,7 @@ func (be *beDatastore) createStatic(
}
key := keyGenerator.Generate()
- iv := key.DefaultIV() // We can use this since each blob will have different key
+ iv := cipherfactory.DefaultIV(key) // We can use this since each blob will have different key
rClone, err := tempWriteBufferPlain.Done() // rClone will allow re-reading the source data
if err != nil {
@@ -141,9 +140,9 @@ func (be *beDatastore) createStatic(
func (be *beDatastore) updateStatic(
ctx context.Context,
- name common.BlobName,
- authInfo AuthInfo,
- key cipherfactory.Key,
+ name *common.BlobName,
+ authInfo *common.AuthInfo,
+ key *common.BlobKey,
r io.Reader,
) error {
return ErrCanNotUpdateStaticBlob
diff --git a/pkg/blenc/datastore_static_test.go b/pkg/blenc/datastore_static_test.go
index bf370be..d0da6e6 100644
--- a/pkg/blenc/datastore_static_test.go
+++ b/pkg/blenc/datastore_static_test.go
@@ -69,7 +69,7 @@ func TestStaticErrorTruncatedDatastore(t *testing.T) {
t.Run("handle error while opening blob", func(t *testing.T) {
injectedErr := errors.New("test")
- dsw.openFn = func(ctx context.Context, name common.BlobName) (io.ReadCloser, error) { return nil, injectedErr }
+ dsw.openFn = func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) { return nil, injectedErr }
rc, err := be.Open(context.Background(), bn, key)
require.ErrorIs(t, err, injectedErr)
@@ -80,7 +80,7 @@ func TestStaticErrorTruncatedDatastore(t *testing.T) {
t.Run("handle error while opening blob", func(t *testing.T) {
injectedErr := errors.New("test")
- dsw.openFn = func(ctx context.Context, name common.BlobName) (io.ReadCloser, error) { return nil, injectedErr }
+ dsw.openFn = func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) { return nil, injectedErr }
rc, err := be.Open(context.Background(), bn, key)
require.ErrorIs(t, err, injectedErr)
@@ -97,9 +97,9 @@ func TestStaticErrorTruncatedDatastore(t *testing.T) {
bn, key, ai, err := be.Create(context.Background(), blobtypes.Static, bytes.NewReader(nil))
require.ErrorIs(t, err, injectedErr)
- require.Nil(t, bn)
- require.Nil(t, key)
- require.Nil(t, ai)
+ require.Empty(t, bn)
+ require.Empty(t, key)
+ require.Empty(t, ai)
})
t.Run("second securefifo", func(t *testing.T) {
@@ -127,9 +127,9 @@ func TestStaticErrorTruncatedDatastore(t *testing.T) {
bn, key, ai, err := be.Create(context.Background(), blobtypes.Static, bytes.NewReader(nil))
require.ErrorIs(t, err, injectedErr)
- require.Nil(t, bn)
- require.Nil(t, key)
- require.Nil(t, ai)
+ require.Empty(t, bn)
+ require.Empty(t, key)
+ require.Empty(t, ai)
require.True(t, firstSecureFifoCreated)
require.True(t, firstSecureFifoClosed)
})
@@ -165,9 +165,9 @@ func TestStaticErrorTruncatedDatastore(t *testing.T) {
bn, key, ai, err := be.Create(context.Background(), blobtypes.Static, bytes.NewReader(nil))
require.ErrorIs(t, err, injectedErr)
- require.Nil(t, bn)
- require.Nil(t, key)
- require.Nil(t, ai)
+ require.Empty(t, bn)
+ require.Empty(t, key)
+ require.Empty(t, ai)
require.Equal(t, 2, secureFifosCreated)
require.Equal(t, secureFifosCreated, secureFifosClosed)
})
@@ -204,9 +204,9 @@ func TestStaticErrorTruncatedDatastore(t *testing.T) {
bn, key, ai, err := be.Create(context.Background(), blobtypes.Static, bytes.NewReader([]byte("Hello world")))
require.ErrorIs(t, err, injectedErr)
- require.Nil(t, bn)
- require.Nil(t, key)
- require.Nil(t, ai)
+ require.Empty(t, bn)
+ require.Empty(t, key)
+ require.Empty(t, ai)
require.Equal(t, 2, secureFifosCreated)
require.Equal(t, secureFifosCreated, secureFifosClosed)
})
@@ -236,9 +236,9 @@ func TestStaticErrorTruncatedDatastore(t *testing.T) {
bn, key, ai, err := be.Create(context.Background(), blobtypes.Static, iotest.ErrReader(injectedErr))
require.ErrorIs(t, err, injectedErr)
- require.Nil(t, bn)
- require.Nil(t, key)
- require.Nil(t, ai)
+ require.Empty(t, bn)
+ require.Empty(t, key)
+ require.Empty(t, ai)
require.Equal(t, 2, secureFifosCreated)
require.Equal(t, secureFifosCreated, secureFifosClosed)
})
@@ -267,13 +267,13 @@ func TestStaticErrorTruncatedDatastore(t *testing.T) {
}, nil
}
- dsw.updateFn = func(ctx context.Context, name common.BlobName, r io.Reader) error { return injectedErr }
+ dsw.updateFn = func(ctx context.Context, name *common.BlobName, r io.Reader) error { return injectedErr }
bn, key, ai, err := be.Create(context.Background(), blobtypes.Static, bytes.NewReader(nil))
require.ErrorIs(t, err, injectedErr)
- require.Nil(t, bn)
- require.Nil(t, key)
- require.Nil(t, ai)
+ require.Empty(t, bn)
+ require.Empty(t, key)
+ require.Empty(t, ai)
require.Equal(t, 2, secureFifosCreated)
require.Equal(t, secureFifosCreated, secureFifosClosed)
diff --git a/pkg/blenc/interface.go b/pkg/blenc/interface.go
index d26b29b..b793ee2 100644
--- a/pkg/blenc/interface.go
+++ b/pkg/blenc/interface.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -22,7 +22,6 @@ import (
"github.com/cinode/go/pkg/common"
"github.com/cinode/go/pkg/datastore"
- "github.com/cinode/go/pkg/internal/utilities/cipherfactory"
)
// AuthInfo is an opaque data that is necessary to perform update of a blob with the same name
@@ -40,23 +39,23 @@ type BE interface {
//
// If returned error is not nil, the reader must be nil. Otherwise it is required to
// close the reader once done working with it.
- Open(ctx context.Context, name common.BlobName, key cipherfactory.Key) (io.ReadCloser, error)
+ Open(ctx context.Context, name *common.BlobName, key *common.BlobKey) (io.ReadCloser, error)
// Create completely new blob with given dataset, as a result, the blob name and optional
// AuthInfo that allows blob's update is returned
- Create(ctx context.Context, blobType common.BlobType, r io.Reader) (common.BlobName, cipherfactory.Key, AuthInfo, error)
+ Create(ctx context.Context, blobType common.BlobType, r io.Reader) (*common.BlobName, *common.BlobKey, *common.AuthInfo, error)
// Update updates given blob type with new data,
// The update must happen within a single blob name (i.e. it can not end up with blob with different name)
// and may not be available for certain blob types such as static blobs.
// A valid auth info is necessary to ensure a correct new content can be created
- Update(ctx context.Context, name common.BlobName, ai AuthInfo, key cipherfactory.Key, r io.Reader) error
+ Update(ctx context.Context, name *common.BlobName, ai *common.AuthInfo, key *common.BlobKey, r io.Reader) error
// Exists does check whether blob of given name exists. It forwards the call
// to underlying datastore.
- Exists(ctx context.Context, name common.BlobName) (bool, error)
+ Exists(ctx context.Context, name *common.BlobName) (bool, error)
// Delete tries to remove blob with given name. It forwards the call to
// underlying datastore.
- Delete(ctx context.Context, name common.BlobName) error
+ Delete(ctx context.Context, name *common.BlobName) error
}
diff --git a/pkg/blenc/interface_test.go b/pkg/blenc/interface_test.go
index 08f6334..2d6b896 100644
--- a/pkg/blenc/interface_test.go
+++ b/pkg/blenc/interface_test.go
@@ -93,7 +93,7 @@ func (s *BlencTestSuite) TestStaticBlobs() {
s.Run("new static blob must be different from the first one", func() {
s.Require().NoError(err)
s.Require().NotEqual(key, key2)
- s.Require().Len(key2, len(key))
+ s.Require().Len(key2.Bytes(), len(key.Bytes()))
})
s.Run("must fail to update static blob", func() {
@@ -121,7 +121,8 @@ func (s *BlencTestSuite) TestStaticBlobs() {
})
s.Run("must fail to open static blob with invalid key", func() {
- rc, err := s.be.Open(context.Background(), bn2, key2[1:])
+ brokenKey := common.BlobKeyFromBytes(key2.Bytes()[1:])
+ rc, err := s.be.Open(context.Background(), bn2, brokenKey)
s.Require().ErrorIs(err, cipherfactory.ErrInvalidEncryptionConfig)
s.Require().Nil(rc)
})
@@ -179,7 +180,7 @@ func (s *BlencTestSuite) TestDynamicLinkSuccessPath() {
s.Run("new dynamic link must be different from the first one", func() {
s.Require().NoError(err)
s.Require().NotEqual(key, key2)
- s.Require().Len(key2, len(key))
+ s.Require().Len(key2.Bytes(), len(key.Bytes()))
})
s.Run("must correctly read blob's content", func() {
@@ -224,7 +225,8 @@ func (s *BlencTestSuite) TestDynamicLinkSuccessPath() {
})
s.Run("must fail to update if auth info is invalid", func() {
- err := s.be.Update(context.Background(), bn, ai2[1:], key2, bytes.NewReader(nil))
+ brokenAI2 := common.AuthInfoFromBytes(ai2.Bytes()[1:])
+ err := s.be.Update(context.Background(), bn, brokenAI2, key2, bytes.NewReader(nil))
s.Require().ErrorIs(err, dynamiclink.ErrInvalidDynamicLinkAuthInfo)
})
@@ -242,9 +244,9 @@ func (s *BlencTestSuite) TestDynamicLinkSuccessPath() {
bn, key, ai, err := s.be.Create(context.Background(), blobtypes.DynamicLink, iotest.ErrReader(injectedErr))
s.Require().ErrorIs(err, injectedErr)
- s.Require().Nil(bn)
- s.Require().Nil(key)
- s.Require().Nil(ai)
+ s.Require().Empty(bn)
+ s.Require().Empty(key)
+ s.Require().Empty(ai)
})
}
@@ -255,19 +257,29 @@ func (s *BlencTestSuite) TestInvalidBlobTypes() {
s.Run("must fail to create blob of invalid type", func() {
bn, key, ai, err := s.be.Create(context.Background(), blobtypes.Invalid, bytes.NewReader(nil))
s.Require().ErrorIs(err, blobtypes.ErrUnknownBlobType)
- s.Require().Nil(bn)
- s.Require().Nil(key)
- s.Require().Nil(ai)
+ s.Require().Empty(bn)
+ s.Require().Empty(key)
+ s.Require().Empty(ai)
})
s.Run("must fail to open blob of invalid type", func() {
- rc, err := s.be.Open(context.Background(), invalidBlobName, cipherfactory.Key{})
+ rc, err := s.be.Open(
+ context.Background(),
+ invalidBlobName,
+ nil,
+ )
s.Require().ErrorIs(err, blobtypes.ErrUnknownBlobType)
s.Require().Nil(rc)
})
s.Run("must fail to update blob of invalid type", func() {
- err = s.be.Update(context.Background(), invalidBlobName, AuthInfo{}, cipherfactory.Key{}, bytes.NewReader(nil))
+ err = s.be.Update(
+ context.Background(),
+ invalidBlobName,
+ nil,
+ nil,
+ bytes.NewReader(nil),
+ )
s.Require().ErrorIs(err, blobtypes.ErrUnknownBlobType)
})
}
diff --git a/pkg/cinodefs/cinodefs_interface.go b/pkg/cinodefs/cinodefs_interface.go
new file mode 100644
index 0000000..9d03ed8
--- /dev/null
+++ b/pkg/cinodefs/cinodefs_interface.go
@@ -0,0 +1,473 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "mime"
+ "net/http"
+ "path/filepath"
+ "time"
+
+ "github.com/cinode/go/pkg/blenc"
+ "github.com/cinode/go/pkg/blobtypes"
+ "github.com/cinode/go/pkg/common"
+ "github.com/cinode/go/pkg/internal/blobtypes/dynamiclink"
+ "github.com/cinode/go/pkg/internal/utilities/headwriter"
+ "github.com/cinode/go/pkg/utilities/golang"
+)
+
+var (
+ ErrInvalidBE = errors.New("invalid BE argument")
+ ErrCantOpenDir = errors.New("can not open directory")
+ ErrCantOpenDirDuplicateEntry = fmt.Errorf("%w: duplicate entry", ErrCantOpenDir)
+ ErrCantOpenLink = errors.New("can not open link")
+ ErrTooManyRedirects = errors.New("too many link redirects")
+ ErrCantComputeBlobKey = errors.New("can not compute blob keys")
+ ErrModifiedDirectory = errors.New("can not get entrypoint for a directory, unsaved content")
+ ErrCantDeleteRoot = errors.New("can not delete root object")
+ ErrNotADirectory = errors.New("entry is not a directory")
+ ErrNotALink = errors.New("entry is not a link")
+ ErrNilEntrypoint = errors.New("nil entrypoint")
+ ErrEmptyName = errors.New("entry name can not be empty")
+ ErrEntryNotFound = errors.New("entry not found")
+ ErrIsADirectory = errors.New("entry is a directory")
+ ErrInvalidDirectoryData = errors.New("invalid directory data")
+ ErrCantWriteDirectory = errors.New("can not write directory")
+ ErrMissingRootInfo = errors.New("root info not specified")
+)
+
+const (
+ CinodeDirMimeType = "application/cinode-dir"
+)
+
+type FS interface {
+ SetEntryFile(
+ ctx context.Context,
+ path []string,
+ data io.Reader,
+ opts ...EntrypointOption,
+ ) (*Entrypoint, error)
+
+ CreateFileEntrypoint(
+ ctx context.Context,
+ data io.Reader,
+ opts ...EntrypointOption,
+ ) (*Entrypoint, error)
+
+ SetEntry(
+ ctx context.Context,
+ path []string,
+ ep *Entrypoint,
+ ) error
+
+ ResetDir(
+ ctx context.Context,
+ path []string,
+ ) error
+
+ Flush(
+ ctx context.Context,
+ ) error
+
+ FindEntry(
+ ctx context.Context,
+ path []string,
+ ) (*Entrypoint, error)
+
+ DeleteEntry(
+ ctx context.Context,
+ path []string,
+ ) error
+
+ InjectDynamicLink(
+ ctx context.Context,
+ path []string,
+ ) (
+ *WriterInfo,
+ error,
+ )
+
+ OpenEntryData(
+ ctx context.Context,
+ path []string,
+ ) (io.ReadCloser, error)
+
+ OpenEntrypointData(
+ ctx context.Context,
+ ep *Entrypoint,
+ ) (io.ReadCloser, error)
+
+ RootEntrypoint() (*Entrypoint, error)
+
+ EntrypointWriterInfo(
+ ctx context.Context,
+ ep *Entrypoint,
+ ) (*WriterInfo, error)
+
+ RootWriterInfo(
+ ctx context.Context,
+ ) (*WriterInfo, error)
+}
+
+type cinodeFS struct {
+ c graphContext
+ maxLinkRedirects int
+ timeFunc func() time.Time
+ randSource io.Reader
+
+ rootEP node
+}
+
+func New(
+ ctx context.Context,
+ be blenc.BE,
+ options ...Option,
+) (FS, error) {
+ if be == nil {
+ return nil, ErrInvalidBE
+ }
+
+ ret := cinodeFS{
+ maxLinkRedirects: DefaultMaxLinksRedirects,
+ timeFunc: time.Now,
+ randSource: rand.Reader,
+ c: graphContext{
+ be: be,
+ authInfos: map[string]*common.AuthInfo{},
+ },
+ }
+
+ for _, opt := range options {
+ err := opt.apply(ctx, &ret)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if ret.rootEP == nil {
+ return nil, ErrMissingRootInfo
+ }
+
+ return &ret, nil
+}
+
+func (fs *cinodeFS) SetEntryFile(
+ ctx context.Context,
+ path []string,
+ data io.Reader,
+ opts ...EntrypointOption,
+) (*Entrypoint, error) {
+ ep := entrypointFromOptions(ctx, opts...)
+ if ep.ep.MimeType == "" && len(path) > 0 {
+ // Try detecting mime type from filename extension
+ ep.ep.MimeType = mime.TypeByExtension(filepath.Ext(path[len(path)-1]))
+ }
+
+ ep, err := fs.createFileEntrypoint(ctx, data, ep)
+ if err != nil {
+ return nil, err
+ }
+
+ err = fs.SetEntry(ctx, path, ep)
+ if err != nil {
+ return nil, err
+ }
+
+ return ep, nil
+}
+
+func (fs *cinodeFS) CreateFileEntrypoint(
+ ctx context.Context,
+ data io.Reader,
+ opts ...EntrypointOption,
+) (*Entrypoint, error) {
+ ep := entrypointFromOptions(ctx, opts...)
+ return fs.createFileEntrypoint(ctx, data, ep)
+}
+
+func (fs *cinodeFS) createFileEntrypoint(
+ ctx context.Context,
+ data io.Reader,
+ ep *Entrypoint,
+) (*Entrypoint, error) {
+ var hw headwriter.Writer
+
+ if ep.ep.MimeType == "" {
+ // detect mimetype from the content
+ hw = headwriter.New(512)
+ data = io.TeeReader(data, &hw)
+ }
+
+ bn, key, _, err := fs.c.be.Create(ctx, blobtypes.Static, data)
+ if err != nil {
+ return nil, err
+ }
+
+ if ep.ep.MimeType == "" {
+ ep.ep.MimeType = http.DetectContentType(hw.Head())
+ }
+
+ return setEntrypointBlobNameAndKey(bn, key, ep), nil
+}
+
+func (fs *cinodeFS) SetEntry(
+ ctx context.Context,
+ path []string,
+ ep *Entrypoint,
+) error {
+ whenReached := func(
+ ctx context.Context,
+ current node,
+ isWriteable bool,
+ ) (node, dirtyState, error) {
+ if !isWriteable {
+ return nil, 0, ErrMissingWriterInfo
+ }
+ return &nodeUnloaded{ep: ep}, dsDirty, nil
+ }
+
+ return fs.traverseGraph(
+ ctx,
+ path,
+ traverseOptions{
+ createNodes: true,
+ maxLinkRedirects: fs.maxLinkRedirects,
+ },
+ whenReached,
+ )
+}
+
+func (fs *cinodeFS) ResetDir(ctx context.Context, path []string) error {
+ whenReached := func(
+ ctx context.Context,
+ current node,
+ isWriteable bool,
+ ) (node, dirtyState, error) {
+ if !isWriteable {
+ return nil, 0, ErrMissingWriterInfo
+ }
+ return &nodeDirectory{
+ entries: map[string]node{},
+ dState: dsDirty,
+ }, dsDirty, nil
+ }
+
+ return fs.traverseGraph(
+ ctx,
+ path,
+ traverseOptions{
+ createNodes: true,
+ maxLinkRedirects: fs.maxLinkRedirects,
+ },
+ whenReached,
+ )
+}
+
+func (fs *cinodeFS) Flush(ctx context.Context) error {
+ _, newRootEP, err := fs.rootEP.flush(ctx, &fs.c)
+ if err != nil {
+ return err
+ }
+
+ fs.rootEP = &nodeUnloaded{ep: newRootEP}
+ return nil
+}
+
+func (fs *cinodeFS) FindEntry(ctx context.Context, path []string) (*Entrypoint, error) {
+ var ret *Entrypoint
+ err := fs.traverseGraph(
+ ctx,
+ path,
+ traverseOptions{
+ doNotCache: true,
+ },
+ func(_ context.Context, ep node, _ bool) (node, dirtyState, error) {
+ var subErr error
+ ret, subErr = ep.entrypoint()
+ return ep, dsClean, subErr
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+func (fs *cinodeFS) DeleteEntry(ctx context.Context, path []string) error {
+ // Entry removal is done on the parent level, we find the parent directory
+ // and remove the entry from its list
+ if len(path) == 0 {
+ return ErrCantDeleteRoot
+ }
+
+ return fs.traverseGraph(
+ ctx,
+ path[:len(path)-1],
+ traverseOptions{createNodes: true},
+ func(_ context.Context, reachedEntrypoint node, isWriteable bool) (node, dirtyState, error) {
+ if !isWriteable {
+ return nil, 0, ErrMissingWriterInfo
+ }
+
+ dir, isDir := reachedEntrypoint.(*nodeDirectory)
+ if !isDir {
+ return nil, 0, ErrNotADirectory
+ }
+
+ if !dir.deleteEntry(path[len(path)-1]) {
+ return nil, 0, ErrEntryNotFound
+ }
+
+ return dir, dsDirty, nil
+ },
+ )
+}
+
+func (fs *cinodeFS) InjectDynamicLink(
+ ctx context.Context,
+ path []string,
+) (
+ *WriterInfo,
+ error,
+) {
+ var retWi *WriterInfo
+
+ whenReached := func(
+ ctx context.Context,
+ current node,
+ isWriteable bool,
+ ) (node, dirtyState, error) {
+ if !isWriteable {
+ return nil, 0, ErrMissingWriterInfo
+ }
+
+ ep, ai, err := fs.generateNewDynamicLinkEntrypoint()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ key, err := fs.c.keyFromEntrypoint(ctx, ep)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ retWi = writerInfoFromBlobNameKeyAndAuthInfo(ep.BlobName(), key, ai)
+ return &nodeLink{
+ ep: ep,
+ target: current,
+ // Link itself must be marked as dirty - even if the content is clean,
+ // the link itself must be persisted
+ dState: dsSubDirty,
+ },
+ // Parent node becomes dirty - new link is a new blob
+ dsDirty,
+ nil
+ }
+
+ err := fs.traverseGraph(
+ ctx,
+ path,
+ traverseOptions{
+ createNodes: true,
+ maxLinkRedirects: fs.maxLinkRedirects,
+ },
+ whenReached,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return retWi, nil
+}
+
+func (fs *cinodeFS) generateNewDynamicLinkEntrypoint() (*Entrypoint, *common.AuthInfo, error) {
+ // Generate new entrypoint link data but do not yet store it in datastore
+ link, err := dynamiclink.Create(fs.randSource)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ bn := link.BlobName()
+ key := link.EncryptionKey()
+ ai := link.AuthInfo()
+
+ fs.c.authInfos[bn.String()] = ai
+
+ return EntrypointFromBlobNameAndKey(bn, key), ai, nil
+}
+
+func (fs *cinodeFS) OpenEntryData(ctx context.Context, path []string) (io.ReadCloser, error) {
+ ep, err := fs.FindEntry(ctx, path)
+ if err != nil {
+ return nil, err
+ }
+ if ep.IsDir() {
+ return nil, ErrIsADirectory
+ }
+ golang.Assert(
+ !ep.IsLink(),
+ "assumed that fs.FindEntry does not return a link",
+ )
+
+ return fs.OpenEntrypointData(ctx, ep)
+}
+
+func (fs *cinodeFS) OpenEntrypointData(ctx context.Context, ep *Entrypoint) (io.ReadCloser, error) {
+ if ep == nil {
+ return nil, ErrNilEntrypoint
+ }
+
+ return fs.c.getDataReader(ctx, ep)
+}
+
+func (fs *cinodeFS) RootEntrypoint() (*Entrypoint, error) {
+ return fs.rootEP.entrypoint()
+}
+
+func (fs *cinodeFS) EntrypointWriterInfo(ctx context.Context, ep *Entrypoint) (*WriterInfo, error) {
+ if !ep.IsLink() {
+ return nil, ErrNotALink
+ }
+
+ bn := ep.BlobName()
+
+ key, err := fs.c.keyFromEntrypoint(ctx, ep)
+ if err != nil {
+ return nil, err
+ }
+
+ authInfo, found := fs.c.authInfos[bn.String()]
+ if !found {
+ return nil, ErrMissingWriterInfo
+ }
+
+ return writerInfoFromBlobNameKeyAndAuthInfo(bn, key, authInfo), nil
+}
+
+func (fs *cinodeFS) RootWriterInfo(ctx context.Context) (*WriterInfo, error) {
+ rootEP, err := fs.RootEntrypoint()
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.EntrypointWriterInfo(ctx, rootEP)
+}
diff --git a/pkg/cinodefs/cinodefs_interface_bb_test.go b/pkg/cinodefs/cinodefs_interface_bb_test.go
new file mode 100644
index 0000000..5f3d936
--- /dev/null
+++ b/pkg/cinodefs/cinodefs_interface_bb_test.go
@@ -0,0 +1,906 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs_test
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+ "testing/iotest"
+ "time"
+
+ "github.com/cinode/go/pkg/blenc"
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/cinodefs/internal/protobuf"
+ "github.com/cinode/go/pkg/common"
+ "github.com/cinode/go/pkg/datastore"
+ "github.com/cinode/go/pkg/internal/blobtypes/dynamiclink"
+ "github.com/cinode/go/pkg/utilities/golang"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+ "google.golang.org/protobuf/proto"
+)
+
+func TestCinodeFSSingleFileScenario(t *testing.T) {
+ ctx := context.Background()
+ fs, err := cinodefs.New(ctx,
+ blenc.FromDatastore(datastore.InMemory()),
+ cinodefs.NewRootDynamicLink(),
+ )
+ require.NoError(t, err)
+ require.NotNil(t, fs)
+
+ { // Check single file write operation
+ path1 := []string{"dir", "subdir", "file.txt"}
+
+ ep1, err := fs.SetEntryFile(ctx,
+ path1,
+ strings.NewReader("Hello world!"),
+ )
+ require.NoError(t, err)
+ require.NotNil(t, ep1)
+
+ ep2, err := fs.FindEntry(
+ ctx,
+ path1,
+ )
+ require.NoError(t, err)
+ require.NotNil(t, ep2)
+
+ require.Equal(t, ep1.String(), ep2.String())
+
+ // Directories are modified, not yet flushed
+ for i := range path1 {
+ ep3, err := fs.FindEntry(ctx, path1[:i])
+ require.ErrorIs(t, err, cinodefs.ErrModifiedDirectory)
+ require.Nil(t, ep3)
+ }
+
+ err = fs.Flush(ctx)
+ require.NoError(t, err)
+ }
+}
+
+type testBEWrapper struct {
+ blenc.BE
+
+ createFunc func(
+ ctx context.Context, blobType common.BlobType, r io.Reader,
+ ) (*common.BlobName, *common.BlobKey, *common.AuthInfo, error)
+
+ updateFunc func(
+ ctx context.Context, name *common.BlobName, ai *common.AuthInfo,
+ key *common.BlobKey, r io.Reader,
+ ) error
+}
+
+func (w *testBEWrapper) Create(
+ ctx context.Context, blobType common.BlobType, r io.Reader,
+) (*common.BlobName, *common.BlobKey, *common.AuthInfo, error) {
+ if w.createFunc != nil {
+ return w.createFunc(ctx, blobType, r)
+ }
+ return w.BE.Create(ctx, blobType, r)
+}
+
+func (w *testBEWrapper) Update(
+ ctx context.Context, name *common.BlobName, ai *common.AuthInfo,
+ key *common.BlobKey, r io.Reader,
+) error {
+ if w.updateFunc != nil {
+ return w.updateFunc(ctx, name, ai, key, r)
+ }
+ return w.BE.Update(ctx, name, ai, key, r)
+}
+
+type testFileEntry struct {
+ path []string
+ content string
+ mimeType string
+}
+
+type CinodeFSMultiFileTestSuite struct {
+ suite.Suite
+
+ ds datastore.DS
+ be testBEWrapper
+ fs cinodefs.FS
+ contentMap []testFileEntry
+ maxLinkRedirects int
+ randSource io.Reader
+ timeFunc func() time.Time
+}
+
+type randReaderForCinodeFSMultiFileTestSuite CinodeFSMultiFileTestSuite
+
+func (r *randReaderForCinodeFSMultiFileTestSuite) Read(b []byte) (int, error) {
+ return r.randSource.Read(b)
+}
+
+func TestCinodeFSMultiFileTestSuite(t *testing.T) {
+ suite.Run(t, &CinodeFSMultiFileTestSuite{
+ maxLinkRedirects: 5,
+ })
+}
+
+func (c *CinodeFSMultiFileTestSuite) SetupTest() {
+ ctx := context.Background()
+
+ c.timeFunc = time.Now
+ c.randSource = rand.Reader
+ c.ds = datastore.InMemory()
+ c.be = testBEWrapper{
+ BE: blenc.FromDatastore(c.ds),
+ }
+ fs, err := cinodefs.New(ctx,
+ &c.be,
+ cinodefs.NewRootDynamicLink(),
+ cinodefs.MaxLinkRedirects(c.maxLinkRedirects),
+ cinodefs.TimeFunc(func() time.Time { return c.timeFunc() }),
+ cinodefs.RandSource((*randReaderForCinodeFSMultiFileTestSuite)(c)),
+ )
+ require.NoError(c.T(), err)
+ require.NotNil(c.T(), fs)
+ c.fs = fs
+
+ const testFilesCount = 10
+ const dirsCount = 3
+ const subDirsCount = 2
+
+ c.contentMap = make([]testFileEntry, testFilesCount)
+ for i := 0; i < testFilesCount; i++ {
+ c.contentMap[i].path = []string{
+ fmt.Sprintf("dir%d", i%dirsCount),
+ fmt.Sprintf("subdir%d", i%subDirsCount),
+ fmt.Sprintf("file%d.txt", i),
+ }
+ c.contentMap[i].content = fmt.Sprintf("Hello world! from file %d!", i)
+ c.contentMap[i].mimeType = "text/plain"
+ }
+
+ for _, file := range c.contentMap {
+ _, err := c.fs.SetEntryFile(ctx,
+ file.path,
+ strings.NewReader(file.content),
+ )
+ require.NoError(c.T(), err)
+ }
+
+ err = c.fs.Flush(context.Background())
+ require.NoError(c.T(), err)
+}
+
+func (c *CinodeFSMultiFileTestSuite) checkContentMap(t *testing.T, fs cinodefs.FS) {
+ ctx := context.Background()
+ for _, file := range c.contentMap {
+ ep, err := fs.FindEntry(ctx, file.path)
+ require.NoError(t, err)
+ require.Contains(t, ep.MimeType(), file.mimeType)
+
+ rc, err := fs.OpenEntrypointData(ctx, ep)
+ require.NoError(t, err)
+ defer rc.Close()
+
+ data, err := io.ReadAll(rc)
+ require.NoError(t, err)
+
+ require.Equal(t, file.content, string(data))
+ }
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestReopeningInReadOnlyMode() {
+ ctx := context.Background()
+ rootEP, err := c.fs.RootEntrypoint()
+ require.NoError(c.T(), err)
+
+ fs2, err := cinodefs.New(
+ ctx,
+ blenc.FromDatastore(c.ds),
+ cinodefs.RootEntrypointString(rootEP.String()),
+ )
+ require.NoError(c.T(), err)
+ require.NotNil(c.T(), fs2)
+
+ c.checkContentMap(c.T(), fs2)
+
+ _, err = c.fs.SetEntryFile(ctx,
+ c.contentMap[0].path,
+ strings.NewReader("modified content"),
+ )
+ require.NoError(c.T(), err)
+
+ // Data in fs was not yet flushed to the datastore, fs2 should still refer to the old content
+ c.checkContentMap(c.T(), fs2)
+
+ err = c.fs.Flush(ctx)
+ require.NoError(c.T(), err)
+
+ // reopen fs2 to avoid any caching issues
+ fs2, err = cinodefs.New(
+ ctx,
+ blenc.FromDatastore(c.ds),
+ cinodefs.RootEntrypoint(rootEP),
+ )
+ require.NoError(c.T(), err)
+
+ // Check with modified content map
+ c.contentMap[0].content = "modified content"
+ c.checkContentMap(c.T(), fs2)
+
+ // We should not be allowed to modify fs2 without writer info
+ ep, err := fs2.SetEntryFile(ctx, c.contentMap[0].path, strings.NewReader("should fail"))
+ require.ErrorIs(c.T(), err, cinodefs.ErrMissingWriterInfo)
+ require.Nil(c.T(), ep)
+ c.checkContentMap(c.T(), c.fs)
+ c.checkContentMap(c.T(), fs2)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestReopeningInReadWriteMode() {
+ ctx := context.Background()
+
+ rootWriterInfo, err := c.fs.RootWriterInfo(ctx)
+ require.NoError(c.T(), err)
+ require.NotNil(c.T(), rootWriterInfo)
+
+ fs3, err := cinodefs.New(
+ ctx,
+ blenc.FromDatastore(c.ds),
+ cinodefs.RootWriterInfoString(rootWriterInfo.String()),
+ )
+ require.NoError(c.T(), err)
+ require.NotNil(c.T(), fs3)
+
+ c.checkContentMap(c.T(), fs3)
+
+ // With a proper auth info we can modify files in the root path
+ ep, err := fs3.SetEntryFile(ctx, c.contentMap[0].path, strings.NewReader("modified through fs3"))
+ require.NoError(c.T(), err)
+ require.NotNil(c.T(), ep)
+
+ c.contentMap[0].content = "modified through fs3"
+ c.checkContentMap(c.T(), fs3)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestRemovalOfAFile() {
+ ctx := context.Background()
+
+ err := c.fs.DeleteEntry(ctx, c.contentMap[0].path)
+ require.NoError(c.T(), err)
+
+ c.contentMap = c.contentMap[1:]
+ c.checkContentMap(c.T(), c.fs)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestRemovalOfADirectory() {
+ ctx := context.Background()
+
+ removedPath := c.contentMap[0].path[:2]
+
+ err := c.fs.DeleteEntry(ctx, removedPath)
+ require.NoError(c.T(), err)
+
+ filteredEntries := []testFileEntry{}
+ removed := 0
+ for _, e := range c.contentMap {
+ if e.path[0] == removedPath[0] && e.path[1] == removedPath[1] {
+ continue
+ }
+
+ filteredEntries = append(filteredEntries, e)
+ removed++
+ }
+ c.contentMap = filteredEntries
+ require.NotZero(c.T(), removed)
+
+ c.checkContentMap(c.T(), c.fs)
+
+ err = c.fs.DeleteEntry(ctx, removedPath)
+ require.ErrorIs(c.T(), err, cinodefs.ErrEntryNotFound)
+
+ c.checkContentMap(c.T(), c.fs)
+
+ ep, err := c.fs.FindEntry(ctx, removedPath)
+ require.ErrorIs(c.T(), err, cinodefs.ErrEntryNotFound)
+ require.Nil(c.T(), ep)
+
+ err = c.fs.DeleteEntry(ctx, []string{})
+ require.ErrorIs(c.T(), err, cinodefs.ErrCantDeleteRoot)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestDeleteTreatFileAsDirectory() {
+ ctx := context.Background()
+
+ path := append(c.contentMap[0].path, "sub-file")
+ err := c.fs.DeleteEntry(ctx, path)
+ require.ErrorIs(c.T(), err, cinodefs.ErrNotADirectory)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestResetDir() {
+ ctx := context.Background()
+
+ removedPath := c.contentMap[0].path[:2]
+
+ err := c.fs.ResetDir(ctx, removedPath)
+ require.NoError(c.T(), err)
+
+ filteredEntries := []testFileEntry{}
+ removed := 0
+ for _, e := range c.contentMap {
+ if e.path[0] == removedPath[0] && e.path[1] == removedPath[1] {
+ continue
+ }
+
+ filteredEntries = append(filteredEntries, e)
+ removed++
+ }
+ c.contentMap = filteredEntries
+ require.NotZero(c.T(), removed)
+
+ c.checkContentMap(c.T(), c.fs)
+
+ err = c.fs.ResetDir(ctx, removedPath)
+ require.NoError(c.T(), err)
+
+ c.checkContentMap(c.T(), c.fs)
+
+ ep, err := c.fs.FindEntry(ctx, removedPath)
+ require.ErrorIs(c.T(), err, cinodefs.ErrModifiedDirectory)
+ require.Nil(c.T(), ep)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestSettingEntry() {
+ ctx := context.Background()
+
+ c.T().Run("prevent treating file as directory", func(t *testing.T) {
+ path := append(c.contentMap[0].path, "sub-file")
+ _, err := c.fs.SetEntryFile(ctx, path, strings.NewReader("should not happen"))
+ require.ErrorIs(t, err, cinodefs.ErrNotADirectory)
+ })
+
+ c.T().Run("prevent setting empty path segment", func(t *testing.T) {
+ for _, path := range [][]string{
+ {"", "subdir", "file.txt"},
+ {"dir", "", "file.txt"},
+ {"dir", "subdir", ""},
+ } {
+ c.T().Run(strings.Join(path, "::"), func(t *testing.T) {
+ _, err := c.fs.SetEntryFile(ctx, path, strings.NewReader("should not succeed"))
+ require.ErrorIs(t, err, cinodefs.ErrEmptyName)
+
+ })
+ }
+ })
+
+ c.T().Run("tet root entrypoint on dirty filesystem", func(t *testing.T) {
+ ep1, err := c.fs.RootEntrypoint()
+ require.NoError(t, err)
+
+ _, err = c.fs.SetEntryFile(ctx, c.contentMap[0].path, strings.NewReader("hello"))
+ require.NoError(t, err)
+ c.contentMap[0].content = "hello"
+
+ ep2, err := c.fs.RootEntrypoint()
+ require.NoError(t, err)
+
+ // Even though dirty, entrypoint won't change it's content
+ require.Equal(t, ep1.String(), ep2.String())
+
+ err = c.fs.Flush(ctx)
+ require.NoError(t, err)
+
+ ep3, err := c.fs.RootEntrypoint()
+ require.NoError(t, err)
+
+ require.Equal(t, ep1.String(), ep3.String())
+ })
+
+ c.T().Run("test crete file entrypoint", func(t *testing.T) {
+ ep, err := c.fs.CreateFileEntrypoint(ctx, strings.NewReader("new file"))
+ require.NoError(t, err)
+ require.NotNil(t, ep)
+
+ err = c.fs.SetEntry(context.Background(), []string{"new-file.txt"}, ep)
+ require.NoError(t, err)
+
+ c.contentMap = append(c.contentMap, testFileEntry{
+ path: []string{"new-file.txt"},
+ content: "new file",
+ mimeType: ep.MimeType(),
+ })
+
+ c.checkContentMap(c.T(), c.fs)
+ })
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestRootEPDirectoryOnDirtyFS() {
+ ctx := context.Background()
+
+ rootDir, err := c.fs.FindEntry(ctx, []string{})
+ require.NoError(c.T(), err)
+
+ fs2, err := cinodefs.New(ctx,
+ blenc.FromDatastore(c.ds),
+ cinodefs.RootEntrypoint(rootDir),
+ )
+ require.NoError(c.T(), err)
+
+ ep1, err := fs2.RootEntrypoint()
+ require.NoError(c.T(), err)
+ require.Equal(c.T(), rootDir.String(), ep1.String())
+
+ _, err = fs2.SetEntryFile(ctx, c.contentMap[0].path, strings.NewReader("hello"))
+ require.NoError(c.T(), err)
+
+ ep2, err := fs2.RootEntrypoint()
+ require.ErrorIs(c.T(), err, cinodefs.ErrModifiedDirectory)
+ require.Nil(c.T(), ep2)
+
+ err = fs2.Flush(ctx)
+ require.NoError(c.T(), err)
+
+ ep3, err := c.fs.RootEntrypoint()
+ require.NoError(c.T(), err)
+
+ require.NotEqual(c.T(), ep1.String(), ep3.String())
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestOpeningData() {
+ _, err := c.fs.OpenEntrypointData(context.Background(), nil)
+ require.ErrorIs(c.T(), err, cinodefs.ErrNilEntrypoint)
+
+ _, err = c.fs.OpenEntryData(context.Background(), []string{"a", "b", "c"})
+ require.ErrorIs(c.T(), err, cinodefs.ErrEntryNotFound)
+
+ _, err = c.fs.OpenEntryData(context.Background(), []string{})
+ require.ErrorIs(c.T(), err, cinodefs.ErrIsADirectory)
+
+ contentReader, err := c.fs.OpenEntryData(context.Background(), c.contentMap[0].path)
+ require.NoError(c.T(), err)
+ content, err := io.ReadAll(contentReader)
+ require.NoError(c.T(), err)
+ require.Equal(c.T(), c.contentMap[0].content, string(content))
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestSubLinksAndWriteOnlyPath() {
+ ctx := context.Background()
+ t := c.T()
+ path := append([]string{}, c.contentMap[0].path...)
+ path = append(path[:len(path)-1], "linked", "sub", "directory", "linked-file.txt")
+ linkPath := path[:len(path)-2]
+
+ // Create normal file
+ ep, err := c.fs.SetEntryFile(ctx, path, strings.NewReader("linked-file"))
+ require.NoError(t, err)
+ c.contentMap = append(c.contentMap, testFileEntry{
+ path: path,
+ content: "linked-file",
+ mimeType: ep.MimeType(),
+ })
+ c.checkContentMap(t, c.fs)
+
+ // Convert path to the file to a dynamic link
+ wi, err := c.fs.InjectDynamicLink(ctx, linkPath)
+ require.NoError(t, err)
+ require.NotNil(t, wi)
+ c.checkContentMap(t, c.fs)
+
+ // Ensure flushing through the dynamic link works
+ err = c.fs.Flush(ctx)
+ require.NoError(t, err)
+ c.checkContentMap(t, c.fs)
+
+ // Ensure the content can still be changed - corresponding auth info
+ // is still kept in the concept
+ _, err = c.fs.SetEntryFile(ctx, path, strings.NewReader("updated-linked-file"))
+ require.NoError(t, err)
+ c.contentMap[len(c.contentMap)-1].content = "updated-linked-file"
+ c.checkContentMap(t, c.fs)
+
+ // Ensure flushing works after the change behind the link
+ err = c.fs.Flush(ctx)
+ require.NoError(t, err)
+ c.checkContentMap(t, c.fs)
+
+ rootWriterInfo, err := c.fs.RootWriterInfo(ctx)
+ require.NoError(t, err)
+
+ // Reopen the filesystem, but only with the root writer info
+ fs2, err := cinodefs.New(ctx,
+ blenc.FromDatastore(c.ds),
+ cinodefs.RootWriterInfoString(rootWriterInfo.String()),
+ )
+ require.NoError(c.T(), err)
+ c.checkContentMap(c.T(), fs2)
+
+ // Can not do any operation below the split point
+ ep, err = fs2.SetEntryFile(ctx, path, strings.NewReader("won't work"))
+ require.ErrorIs(t, err, cinodefs.ErrMissingWriterInfo)
+ require.Nil(t, ep)
+
+ altPath := append(append([]string{}, path[:len(path)-1]...), "other", "directory", "path")
+ ep, err = fs2.SetEntryFile(ctx, altPath, strings.NewReader("won't work"))
+ require.ErrorIs(t, err, cinodefs.ErrMissingWriterInfo)
+ require.Nil(t, ep)
+
+ err = fs2.ResetDir(ctx, path[:len(path)-1])
+ require.ErrorIs(t, err, cinodefs.ErrMissingWriterInfo)
+
+ err = fs2.DeleteEntry(ctx, path)
+ require.ErrorIs(t, err, cinodefs.ErrMissingWriterInfo)
+
+ _, err = fs2.InjectDynamicLink(ctx, path)
+ require.ErrorIs(t, err, cinodefs.ErrMissingWriterInfo)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestMaxLinksRedirects() {
+ t := c.T()
+ ctx := context.Background()
+
+ entryPath := c.contentMap[0].path
+ linkPath := entryPath[:len(entryPath)-1]
+
+ // Up to max links redirects, lookup must be allowed
+ for i := 0; i < c.maxLinkRedirects; i++ {
+ _, err := c.fs.InjectDynamicLink(ctx, linkPath)
+ require.NoError(t, err)
+
+ _, err = c.fs.FindEntry(ctx, entryPath)
+ require.NoError(t, err)
+ }
+
+ // Cross the max redirects count, next lookup should fail
+ _, err := c.fs.InjectDynamicLink(ctx, linkPath)
+ require.NoError(t, err)
+
+ _, err = c.fs.FindEntry(ctx, entryPath)
+ require.ErrorIs(t, err, cinodefs.ErrTooManyRedirects)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestExplicitMimeType() {
+ t := c.T()
+ ctx := context.Background()
+ entryPath := c.contentMap[0].path
+ const newMimeType = "forced-mime-type"
+
+ _, err := c.fs.SetEntryFile(ctx,
+ entryPath,
+ strings.NewReader("modified content"),
+ cinodefs.SetMimeType(newMimeType),
+ )
+ require.NoError(t, err)
+
+ entry, err := c.fs.FindEntry(ctx, entryPath)
+ require.NoError(t, err)
+ require.Equal(t, newMimeType, entry.MimeType())
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestMalformedDirectory() {
+ var ep protobuf.Entrypoint
+ err := proto.Unmarshal(
+ golang.Must(c.fs.FindEntry(context.Background(), c.contentMap[0].path)).Bytes(),
+ &ep,
+ )
+ require.NoError(c.T(), err)
+
+ var brokenEP protobuf.Entrypoint
+ proto.Merge(&brokenEP, &ep)
+ brokenEP.BlobName = []byte{}
+
+ for _, d := range []struct {
+ n string
+ d []byte
+ err error
+ }{
+ {
+ "malformed data",
+ []byte{23, 45, 67, 89, 12, 34, 56, 78, 90}, // Some malformed message
+ cinodefs.ErrCantOpenDir,
+ },
+ {
+ "entry with empty name",
+ golang.Must(proto.Marshal(&protobuf.Directory{
+ Entries: []*protobuf.Directory_Entry{{
+ Name: "",
+ }},
+ })),
+ cinodefs.ErrEmptyName,
+ },
+ {
+ "two entries with the same name",
+ golang.Must(proto.Marshal(&protobuf.Directory{
+ Entries: []*protobuf.Directory_Entry{
+ {Name: "entry", Ep: &ep},
+ {Name: "entry", Ep: &ep},
+ },
+ })),
+ cinodefs.ErrCantOpenDirDuplicateEntry,
+ },
+ {
+ "missing entrypoint",
+ golang.Must(proto.Marshal(&protobuf.Directory{
+ Entries: []*protobuf.Directory_Entry{
+ {Name: "entry"},
+ },
+ })),
+ cinodefs.ErrInvalidEntrypointDataNil,
+ },
+ {
+ "missing blob name",
+ golang.Must(proto.Marshal(&protobuf.Directory{
+ Entries: []*protobuf.Directory_Entry{
+ {Name: "entry", Ep: &brokenEP},
+ },
+ })),
+ common.ErrInvalidBlobName,
+ },
+ } {
+ c.T().Run(d.n, func(t *testing.T) {
+ _, err := c.fs.SetEntryFile(context.Background(),
+ []string{"dir"},
+ bytes.NewReader(d.d),
+ cinodefs.SetMimeType(cinodefs.CinodeDirMimeType),
+ )
+ require.NoError(t, err)
+
+ _, err = c.fs.FindEntry(context.Background(), []string{"dir", "entry"})
+ require.ErrorIs(t, err, cinodefs.ErrCantOpenDir)
+ require.ErrorIs(t, err, d.err)
+
+ // TODO: We should be able to set new entry even if the underlying object is broken
+ err = c.fs.DeleteEntry(context.Background(), []string{"dir"})
+ require.NoError(t, err)
+ })
+ }
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestMalformedLink() {
+ var ep protobuf.Entrypoint
+ err := proto.Unmarshal(
+ golang.Must(c.fs.FindEntry(context.Background(), c.contentMap[0].path)).Bytes(),
+ &ep,
+ )
+ require.NoError(c.T(), err)
+
+ var brokenEP protobuf.Entrypoint
+ proto.Merge(&brokenEP, &ep)
+ brokenEP.BlobName = []byte{}
+
+ _, err = c.fs.SetEntryFile(context.Background(), []string{"link", "file"}, strings.NewReader("test"))
+ require.NoError(c.T(), err)
+
+ linkWI_, err := c.fs.InjectDynamicLink(context.Background(), []string{"link"})
+ require.NoError(c.T(), err)
+
+ // Flush is needed so that we can update entrypoint data and the fs cache won't get into our way
+ err = c.fs.Flush(context.Background())
+ require.NoError(c.T(), err)
+
+ for _, d := range []struct {
+ n string
+ d []byte
+ err error
+ }{
+ {
+ "malformed data",
+ []byte{23, 45, 67, 89, 12, 34, 56, 78, 90}, // Some malformed message
+ cinodefs.ErrCantOpenLink,
+ },
+ {
+ "missing target blob name",
+ golang.Must(proto.Marshal(&brokenEP)),
+ common.ErrInvalidBlobName,
+ },
+ } {
+ c.T().Run(d.n, func(t *testing.T) {
+ var linkWI protobuf.WriterInfo
+ err = proto.Unmarshal(linkWI_.Bytes(), &linkWI)
+ require.NoError(c.T(), err)
+ linkBlobName := golang.Must(common.BlobNameFromBytes(linkWI.BlobName))
+ linkAuthInfo := common.AuthInfoFromBytes(linkWI.AuthInfo)
+ linkKey := common.BlobKeyFromBytes(linkWI.Key)
+
+ err = c.be.Update(context.Background(),
+ linkBlobName, linkAuthInfo, linkKey, bytes.NewReader(d.d),
+ )
+ require.NoError(t, err)
+
+ _, err = c.fs.FindEntry(context.Background(), []string{"link", "file"})
+ require.ErrorIs(t, err, cinodefs.ErrCantOpenLink)
+ require.ErrorIs(t, err, d.err)
+ })
+ }
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestPathWithMultipleLinks() {
+ path := []string{
+ "multi",
+ "level",
+ "path",
+ "with",
+ "more",
+ "than",
+ "one",
+ "link",
+ }
+ ctx := context.Background()
+ t := c.T()
+
+ // Create test entry
+ const initialContent = "initial content"
+ ep, err := c.fs.SetEntryFile(ctx, path, strings.NewReader(initialContent))
+ require.NoError(t, err)
+
+ // Inject few links among the path to the entry
+ for _, splitPoint := range []int{2, 6, 4} {
+ _, err = c.fs.InjectDynamicLink(ctx, path[:splitPoint])
+ require.NoError(t, err)
+
+ err = c.fs.Flush(ctx)
+ require.NoError(t, err)
+ }
+
+ // Create parallel filesystem
+ rootEP, err := c.fs.RootEntrypoint()
+ require.NoError(t, err)
+
+ fs2, err := cinodefs.New(ctx,
+ blenc.FromDatastore(c.ds),
+ cinodefs.RootEntrypointString(rootEP.String()),
+ )
+ require.NoError(t, err)
+
+ c.contentMap = append(c.contentMap, testFileEntry{
+ path: path,
+ content: initialContent,
+ mimeType: ep.MimeType(),
+ })
+ c.checkContentMap(t, c.fs)
+
+ // Modify the content of the file in the original filesystem, not yet flushed
+ const modifiedContent1 = "modified content 1"
+ _, err = c.fs.SetEntryFile(ctx, path, strings.NewReader(modifiedContent1))
+ require.NoError(t, err)
+
+ // Change not yet observed through the second filesystem due to no flush
+ c.checkContentMap(t, fs2)
+
+ err = c.fs.Flush(ctx)
+ require.NoError(t, err)
+
+ // Change must now be observed through the second filesystem
+ c.contentMap[len(c.contentMap)-1].content = modifiedContent1
+ c.checkContentMap(t, c.fs)
+ c.checkContentMap(t, fs2)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestBlobWriteErrorWhenCreatingFile() {
+ injectedErr := errors.New("entry file create error")
+ c.be.createFunc = func(ctx context.Context, blobType common.BlobType, r io.Reader,
+ ) (*common.BlobName, *common.BlobKey, *common.AuthInfo, error) {
+ return nil, nil, nil, injectedErr
+ }
+
+ _, err := c.fs.SetEntryFile(context.Background(), []string{"file"}, strings.NewReader("test"))
+ require.ErrorIs(c.T(), err, injectedErr)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestBlobWriteErrorWhenFlushing() {
+ _, err := c.fs.SetEntryFile(context.Background(), []string{"file"}, strings.NewReader("test"))
+ require.NoError(c.T(), err)
+
+ injectedErr := errors.New("flush error")
+ c.be.createFunc = func(ctx context.Context, blobType common.BlobType, r io.Reader,
+ ) (*common.BlobName, *common.BlobKey, *common.AuthInfo, error) {
+ return nil, nil, nil, injectedErr
+ }
+
+ err = c.fs.Flush(context.Background())
+ require.ErrorIs(c.T(), err, injectedErr)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestLinkGenerationError() {
+ injectedErr := errors.New("rand data read error")
+
+ c.randSource = iotest.ErrReader(injectedErr)
+
+ _, err := c.fs.InjectDynamicLink(
+ context.Background(),
+ c.contentMap[0].path[:2],
+ )
+ require.ErrorIs(c.T(), err, injectedErr)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestBlobWriteWhenCreatingLink() {
+ injectedErr := errors.New("link creation error")
+ c.be.updateFunc = func(ctx context.Context, name *common.BlobName, ai *common.AuthInfo, key *common.BlobKey, r io.Reader) error {
+ return injectedErr
+ }
+
+ _, err := c.fs.InjectDynamicLink(context.Background(), c.contentMap[0].path[:2])
+ require.NoError(c.T(), err)
+
+ err = c.fs.Flush(context.Background())
+ require.ErrorIs(c.T(), err, injectedErr)
+}
+
+func (c *CinodeFSMultiFileTestSuite) TestReadFailureMissingKey() {
+ var epProto protobuf.Entrypoint
+ err := proto.Unmarshal(
+ golang.Must(c.fs.FindEntry(context.Background(), c.contentMap[0].path)).Bytes(),
+ &epProto,
+ )
+ require.NoError(c.T(), err)
+
+ // Generate derived EP without key
+ epProto.KeyInfo.Key = nil
+ ep := golang.Must(cinodefs.EntrypointFromBytes(
+ golang.Must(proto.Marshal(&epProto)),
+ ))
+
+ // Replace current entrypoint with one without the key
+ err = c.fs.SetEntry(context.Background(), c.contentMap[0].path, ep)
+ require.NoError(c.T(), err)
+
+ r, err := c.fs.OpenEntryData(context.Background(), c.contentMap[0].path)
+ require.ErrorIs(c.T(), err, cinodefs.ErrMissingKeyInfo)
+ require.Nil(c.T(), r)
+}
+
+func TestFetchingWriterInfo(t *testing.T) {
+ t.Run("not a dynamic link", func(t *testing.T) {
+ fs, err := cinodefs.New(
+ context.Background(),
+ blenc.FromDatastore(datastore.InMemory()),
+ cinodefs.NewRootStaticDirectory(),
+ )
+ require.NoError(t, err)
+
+ wi, err := fs.RootWriterInfo(context.Background())
+ require.ErrorIs(t, err, cinodefs.ErrModifiedDirectory)
+ require.Nil(t, wi)
+
+ err = fs.Flush(context.Background())
+ require.NoError(t, err)
+
+ wi, err = fs.RootWriterInfo(context.Background())
+ require.ErrorIs(t, err, cinodefs.ErrNotALink)
+ require.Nil(t, wi)
+ })
+
+ t.Run("dynamic link without writer info", func(t *testing.T) {
+ link, err := dynamiclink.Create(rand.Reader)
+ require.NoError(t, err)
+ ep := cinodefs.EntrypointFromBlobNameAndKey(link.BlobName(), link.EncryptionKey())
+
+ fs, err := cinodefs.New(
+ context.Background(),
+ blenc.FromDatastore(datastore.InMemory()),
+ // Set entrypoint without auth info
+ cinodefs.RootEntrypoint(ep),
+ )
+ require.NoError(t, err)
+
+ wi, err := fs.RootWriterInfo(context.Background())
+ require.ErrorIs(t, err, cinodefs.ErrMissingWriterInfo)
+ require.Nil(t, wi)
+ })
+}
diff --git a/pkg/cinodefs/cinodefs_options.go b/pkg/cinodefs/cinodefs_options.go
new file mode 100644
index 0000000..79238fe
--- /dev/null
+++ b/pkg/cinodefs/cinodefs_options.go
@@ -0,0 +1,168 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/cinode/go/pkg/common"
+)
+
+const (
+ DefaultMaxLinksRedirects = 10
+)
+
+var (
+ ErrNegativeMaxLinksRedirects = errors.New("negative value of maximum links redirects")
+ ErrInvalidNilTimeFunc = errors.New("nil time function")
+ ErrInvalidNilRandSource = errors.New("nil random source")
+)
+
+type Option interface {
+ apply(ctx context.Context, fs *cinodeFS) error
+}
+
+type errOption struct{ err error }
+
+func (e errOption) apply(ctx context.Context, fs *cinodeFS) error { return e.err }
+
+type optionFunc func(ctx context.Context, fs *cinodeFS) error
+
+func (f optionFunc) apply(ctx context.Context, fs *cinodeFS) error {
+ return f(ctx, fs)
+}
+
+func MaxLinkRedirects(maxLinkRedirects int) Option {
+ if maxLinkRedirects < 0 {
+ return errOption{ErrNegativeMaxLinksRedirects}
+ }
+ return optionFunc(func(ctx context.Context, fs *cinodeFS) error {
+ fs.maxLinkRedirects = maxLinkRedirects
+ return nil
+ })
+}
+
+func RootEntrypoint(ep *Entrypoint) Option {
+ return optionFunc(func(ctx context.Context, fs *cinodeFS) error {
+ fs.rootEP = &nodeUnloaded{ep: ep}
+ return nil
+ })
+}
+
+func RootEntrypointString(eps string) Option {
+ ep, err := EntrypointFromString(eps)
+ if err != nil {
+ return errOption{err}
+ }
+ return RootEntrypoint(ep)
+}
+
+func RootWriterInfo(wi *WriterInfo) Option {
+ if wi == nil {
+ return errOption{fmt.Errorf(
+ "%w: nil",
+ ErrInvalidWriterInfoData,
+ )}
+ }
+ bn, err := common.BlobNameFromBytes(wi.wi.BlobName)
+ if err != nil {
+ return errOption{fmt.Errorf(
+ "%w: %w",
+ ErrInvalidWriterInfoData,
+ err,
+ )}
+ }
+
+ key := common.BlobKeyFromBytes(wi.wi.Key)
+ ep := EntrypointFromBlobNameAndKey(bn, key)
+
+ return optionFunc(func(ctx context.Context, fs *cinodeFS) error {
+ fs.rootEP = &nodeUnloaded{ep: ep}
+ fs.c.authInfos[bn.String()] = common.AuthInfoFromBytes(wi.wi.AuthInfo)
+ return nil
+ })
+}
+
+func RootWriterInfoString(wis string) Option {
+ wi, err := WriterInfoFromString(wis)
+ if err != nil {
+ return errOption{err}
+ }
+
+ return RootWriterInfo(wi)
+}
+
+func TimeFunc(f func() time.Time) Option {
+ if f == nil {
+ return errOption{ErrInvalidNilTimeFunc}
+ }
+ return optionFunc(func(ctx context.Context, fs *cinodeFS) error {
+ fs.timeFunc = f
+ return nil
+ })
+}
+
+func RandSource(r io.Reader) Option {
+ if r == nil {
+ return errOption{ErrInvalidNilRandSource}
+ }
+ return optionFunc(func(ctx context.Context, fs *cinodeFS) error {
+ fs.randSource = r
+ return nil
+ })
+}
+
+// NewRootDynamicLink option can be used to create completely new, random
+// dynamic link as the root
+func NewRootDynamicLink() Option {
+ return optionFunc(func(ctx context.Context, fs *cinodeFS) error {
+ newLinkEntrypoint, _, err := fs.generateNewDynamicLinkEntrypoint()
+ if err != nil {
+ return err
+ }
+
+ // Generate a simple dummy structure consisting of a root link
+ // and an empty directory, all the entries are in-memory upon
+ // creation and have to be flushed first to generate any
+ // blobs
+ fs.rootEP = &nodeLink{
+ ep: newLinkEntrypoint,
+ dState: dsSubDirty,
+ target: &nodeDirectory{
+ entries: map[string]node{},
+ dState: dsDirty,
+ },
+ }
+ return nil
+ })
+}
+
+// NewRootDynamicLink option can be used to create completely new, random
+// dynamic link as the root
+func NewRootStaticDirectory() Option {
+ return optionFunc(func(ctx context.Context, fs *cinodeFS) error {
+ fs.rootEP = &nodeDirectory{
+ entries: map[string]node{},
+ dState: dsDirty,
+ }
+ return nil
+ })
+}
diff --git a/pkg/cinodefs/cinodefs_options_bb_test.go b/pkg/cinodefs/cinodefs_options_bb_test.go
new file mode 100644
index 0000000..6baa2e8
--- /dev/null
+++ b/pkg/cinodefs/cinodefs_options_bb_test.go
@@ -0,0 +1,115 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs_test
+
+import (
+ "context"
+ "errors"
+ "testing"
+ "testing/iotest"
+
+ "github.com/cinode/go/pkg/blenc"
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/datastore"
+ "github.com/stretchr/testify/require"
+)
+
+func TestInvalidCinodeFSOptions(t *testing.T) {
+ t.Run("no blenc", func(t *testing.T) {
+ cfs, err := cinodefs.New(context.Background(), nil)
+ require.ErrorIs(t, err, cinodefs.ErrInvalidBE)
+ require.Nil(t, cfs)
+ })
+
+ be := blenc.FromDatastore(datastore.InMemory())
+
+ t.Run("no root info", func(t *testing.T) {
+ cfs, err := cinodefs.New(context.Background(), be)
+ require.ErrorIs(t, err, cinodefs.ErrMissingRootInfo)
+ require.Nil(t, cfs)
+ })
+
+ t.Run("negative max links redirects", func(t *testing.T) {
+ cfs, err := cinodefs.New(context.Background(), be,
+ cinodefs.NewRootStaticDirectory(),
+ cinodefs.MaxLinkRedirects(-1),
+ )
+ require.ErrorIs(t, err, cinodefs.ErrNegativeMaxLinksRedirects)
+ require.Nil(t, cfs)
+ })
+
+ t.Run("invalid entrypoint string", func(t *testing.T) {
+ cfs, err := cinodefs.New(context.Background(), be,
+ cinodefs.RootEntrypointString(""),
+ )
+ require.ErrorIs(t, err, cinodefs.ErrInvalidEntrypointData)
+ require.Nil(t, cfs)
+ })
+
+ t.Run("invalid writer info string", func(t *testing.T) {
+ cfs, err := cinodefs.New(context.Background(), be,
+ cinodefs.RootWriterInfoString(""),
+ )
+ require.ErrorIs(t, err, cinodefs.ErrInvalidWriterInfoData)
+ require.Nil(t, cfs)
+ })
+
+ t.Run("invalid nil writer info", func(t *testing.T) {
+ cfs, err := cinodefs.New(context.Background(), be,
+ cinodefs.RootWriterInfo(nil),
+ )
+ require.ErrorIs(t, err, cinodefs.ErrInvalidWriterInfoData)
+ require.Nil(t, cfs)
+ })
+
+ t.Run("invalid writer info", func(t *testing.T) {
+ cfs, err := cinodefs.New(context.Background(), be,
+ cinodefs.RootWriterInfo(&cinodefs.WriterInfo{}),
+ )
+ require.ErrorIs(t, err, cinodefs.ErrInvalidWriterInfoData)
+ require.Nil(t, cfs)
+ })
+
+ t.Run("invalid time func", func(t *testing.T) {
+ cfs, err := cinodefs.New(context.Background(), be,
+ cinodefs.TimeFunc(nil),
+ )
+ require.ErrorIs(t, err, cinodefs.ErrInvalidNilTimeFunc)
+ require.Nil(t, cfs)
+ })
+
+ t.Run("invalid nil random source", func(t *testing.T) {
+ cfs, err := cinodefs.New(context.Background(), be,
+ cinodefs.RandSource(nil),
+ )
+ require.ErrorIs(t, err, cinodefs.ErrInvalidNilRandSource)
+ require.Nil(t, cfs)
+ })
+
+ t.Run("invalid random source", func(t *testing.T) {
+ // Error will manifest itself while random data source
+ // is needed which only takes place when new random
+ // dynamic link is requested
+ injectedErr := errors.New("random source error")
+ cfs, err := cinodefs.New(context.Background(), be,
+ cinodefs.RandSource(iotest.ErrReader(injectedErr)),
+ cinodefs.NewRootDynamicLink(),
+ )
+ require.ErrorIs(t, err, injectedErr)
+ require.Nil(t, cfs)
+ })
+}
diff --git a/pkg/cinodefs/cinodefs_traverse.go b/pkg/cinodefs/cinodefs_traverse.go
new file mode 100644
index 0000000..9c54074
--- /dev/null
+++ b/pkg/cinodefs/cinodefs_traverse.go
@@ -0,0 +1,72 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+import (
+ "context"
+)
+
+type traverseGoalFunc func(
+ ctx context.Context,
+ reachedEntrypoint node,
+ isWriteable bool,
+) (
+ replacementEntrypoint node,
+ changeResult dirtyState,
+ err error,
+)
+
+type traverseOptions struct {
+ createNodes bool
+ doNotCache bool
+ maxLinkRedirects int
+}
+
+// Generic graph traversal function, it follows given path, once the endpoint
+// is reached, it executed given callback function.
+func (fs *cinodeFS) traverseGraph(
+ ctx context.Context,
+ path []string,
+ opts traverseOptions,
+ whenReached traverseGoalFunc,
+) error {
+ for _, p := range path {
+ if p == "" {
+ return ErrEmptyName
+ }
+ }
+
+ opts.maxLinkRedirects = fs.maxLinkRedirects
+
+ changedEntrypoint, _, err := fs.rootEP.traverse(
+ ctx, // context
+ &fs.c, // graph context
+ path, // path
+ 0, // pathPosition - start at the beginning
+ 0, // linkDepth - we don't come from any link
+ true, // isWritable - root is always writable
+ opts, // traverseOptions
+ whenReached, // callback
+ )
+ if err != nil {
+ return err
+ }
+ if !opts.doNotCache {
+ fs.rootEP = changedEntrypoint
+ }
+ return nil
+}
diff --git a/pkg/cinodefs/context.go b/pkg/cinodefs/context.go
new file mode 100644
index 0000000..99576ee
--- /dev/null
+++ b/pkg/cinodefs/context.go
@@ -0,0 +1,164 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/cinode/go/pkg/blenc"
+ "github.com/cinode/go/pkg/cinodefs/internal/protobuf"
+ "github.com/cinode/go/pkg/common"
+ "google.golang.org/protobuf/proto"
+)
+
+var (
+ ErrMissingKeyInfo = errors.New("missing key info")
+ ErrMissingWriterInfo = errors.New("missing writer info")
+)
+
+type graphContext struct {
+ // blenc layer used in the graph
+ be blenc.BE
+
+ // known writer info data
+ authInfos map[string]*common.AuthInfo
+}
+
+// Get symmetric encryption key for given entrypoint.
+//
+// Note: Currently the key will be stored inside entrypoint data,
+// but more advanced methods of obtaining the key may be added
+// through this function in the future.
+func (c *graphContext) keyFromEntrypoint(
+ ctx context.Context,
+ ep *Entrypoint,
+) (*common.BlobKey, error) {
+ if ep.ep.KeyInfo == nil ||
+ ep.ep.KeyInfo.Key == nil {
+ return nil, ErrMissingKeyInfo
+ }
+ return common.BlobKeyFromBytes(ep.ep.GetKeyInfo().GetKey()), nil
+}
+
+// open io.ReadCloser for data behind given entrypoint
+func (c *graphContext) getDataReader(
+ ctx context.Context,
+ ep *Entrypoint,
+) (
+ io.ReadCloser,
+ error,
+) {
+ key, err := c.keyFromEntrypoint(ctx, ep)
+ if err != nil {
+ return nil, err
+ }
+ rc, err := c.be.Open(ctx, ep.BlobName(), key)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open blob: %w", err)
+ }
+ return rc, nil
+}
+
+// return data behind entrypoint
+func (c *graphContext) readProtobufMessage(
+ ctx context.Context,
+ ep *Entrypoint,
+ msg proto.Message,
+) error {
+ rc, err := c.getDataReader(ctx, ep)
+ if err != nil {
+ return err
+ }
+ defer rc.Close()
+
+ data, err := io.ReadAll(rc)
+ if err != nil {
+ return fmt.Errorf("failed to read blob: %w", err)
+ }
+
+ err = proto.Unmarshal(data, msg)
+ if err != nil {
+ return fmt.Errorf("malformed data: %w", err)
+ }
+
+ return nil
+}
+
+func (c *graphContext) createProtobufMessage(
+ ctx context.Context,
+ blobType common.BlobType,
+ msg proto.Message,
+) (
+ *Entrypoint,
+ error,
+) {
+ data, err := proto.Marshal(msg)
+ if err != nil {
+ return nil, fmt.Errorf("serialization failed: %w", err)
+ }
+
+ bn, key, ai, err := c.be.Create(ctx, blobType, bytes.NewReader(data))
+ if err != nil {
+ return nil, fmt.Errorf("write failed: %w", err)
+ }
+
+ if ai != nil {
+ c.authInfos[bn.String()] = ai
+ }
+
+ return &Entrypoint{
+ bn: bn,
+ ep: protobuf.Entrypoint{
+ BlobName: bn.Bytes(),
+ KeyInfo: &protobuf.KeyInfo{
+ Key: key.Bytes(),
+ },
+ },
+ }, nil
+}
+
+func (c *graphContext) updateProtobufMessage(
+ ctx context.Context,
+ ep *Entrypoint,
+ msg proto.Message,
+) error {
+ wi, found := c.authInfos[ep.BlobName().String()]
+ if !found {
+ return ErrMissingWriterInfo
+ }
+
+ key, err := c.keyFromEntrypoint(ctx, ep)
+ if err != nil {
+ return err
+ }
+
+ data, err := proto.Marshal(msg)
+ if err != nil {
+ return fmt.Errorf("serialization failed: %w", err)
+ }
+
+ err = c.be.Update(ctx, ep.BlobName(), wi, key, bytes.NewReader(data))
+ if err != nil {
+ return fmt.Errorf("write failed: %w", err)
+ }
+
+ return nil
+}
diff --git a/pkg/cinodefs/entrypoint.go b/pkg/cinodefs/entrypoint.go
new file mode 100644
index 0000000..adae7cb
--- /dev/null
+++ b/pkg/cinodefs/entrypoint.go
@@ -0,0 +1,138 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/cinode/go/pkg/blobtypes"
+ "github.com/cinode/go/pkg/cinodefs/internal/protobuf"
+ "github.com/cinode/go/pkg/common"
+ "github.com/cinode/go/pkg/utilities/golang"
+ "github.com/jbenet/go-base58"
+ "google.golang.org/protobuf/proto"
+)
+
+var (
+ ErrInvalidEntrypointData = errors.New("invalid entrypoint data")
+ ErrInvalidEntrypointDataParse = fmt.Errorf("%w: protobuf parse error", ErrInvalidEntrypointData)
+ ErrInvalidEntrypointDataLinkMimetype = fmt.Errorf("%w: link can not have mimetype set", ErrInvalidEntrypointData)
+ ErrInvalidEntrypointDataNil = fmt.Errorf("%w: nil data", ErrInvalidEntrypointData)
+ ErrInvalidEntrypointTime = errors.New("time validation failed")
+ ErrExpired = fmt.Errorf("%w: entry expired", ErrInvalidEntrypointTime)
+ ErrNotYetValid = fmt.Errorf("%w: entry not yet valid", ErrInvalidEntrypointTime)
+)
+
+type Entrypoint struct {
+ ep protobuf.Entrypoint
+ bn *common.BlobName
+}
+
+func EntrypointFromString(s string) (*Entrypoint, error) {
+ if len(s) == 0 {
+ return nil, fmt.Errorf("%w: empty string", ErrInvalidEntrypointData)
+ }
+
+ b := base58.Decode(s)
+ if len(b) == 0 {
+ return nil, fmt.Errorf("%w: not a base58 string", ErrInvalidEntrypointData)
+ }
+
+ return EntrypointFromBytes(b)
+}
+
+func EntrypointFromBytes(b []byte) (*Entrypoint, error) {
+ ep := &Entrypoint{}
+
+ err := proto.Unmarshal(b, &ep.ep)
+ if err != nil {
+ return nil, fmt.Errorf("%w: %s", ErrInvalidEntrypointDataParse, err)
+ }
+
+ err = expandEntrypointProto(ep)
+ if err != nil {
+ return nil, err
+ }
+
+ return ep, nil
+}
+
+func entrypointFromProtobuf(data *protobuf.Entrypoint) (*Entrypoint, error) {
+ if data == nil {
+ return nil, ErrInvalidEntrypointDataNil
+ }
+
+ ep := &Entrypoint{}
+ proto.Merge(&ep.ep, data)
+ err := expandEntrypointProto(ep)
+ if err != nil {
+ return nil, err
+ }
+ return ep, nil
+}
+
+func expandEntrypointProto(ep *Entrypoint) error {
+ // Extract blob name from entrypoint
+ bn, err := common.BlobNameFromBytes(ep.ep.BlobName)
+ if err != nil {
+ return fmt.Errorf("%w: %w", ErrInvalidEntrypointData, err)
+ }
+ ep.bn = bn
+
+ // Links must not have mimetype set
+ if ep.IsLink() && ep.ep.MimeType != "" {
+ return ErrInvalidEntrypointDataLinkMimetype
+ }
+
+ return nil
+}
+
+func EntrypointFromBlobNameAndKey(bn *common.BlobName, key *common.BlobKey) *Entrypoint {
+ return setEntrypointBlobNameAndKey(bn, key, &Entrypoint{})
+}
+
+func setEntrypointBlobNameAndKey(bn *common.BlobName, key *common.BlobKey, ep *Entrypoint) *Entrypoint {
+ ep.bn = bn
+ ep.ep.BlobName = bn.Bytes()
+ ep.ep.KeyInfo = &protobuf.KeyInfo{Key: key.Bytes()}
+ return ep
+}
+
+func (e *Entrypoint) String() string {
+ return base58.Encode(e.Bytes())
+}
+
+func (e *Entrypoint) Bytes() []byte {
+ return golang.Must(proto.Marshal(&e.ep))
+}
+
+func (e *Entrypoint) BlobName() *common.BlobName {
+ return e.bn
+}
+
+func (e *Entrypoint) IsLink() bool {
+ return e.bn.Type() == blobtypes.DynamicLink
+}
+
+func (e *Entrypoint) IsDir() bool {
+ return e.ep.MimeType == CinodeDirMimeType
+}
+
+func (e *Entrypoint) MimeType() string {
+ return e.ep.MimeType
+}
diff --git a/pkg/cinodefs/entrypoint_bb_test.go b/pkg/cinodefs/entrypoint_bb_test.go
new file mode 100644
index 0000000..cebe0e0
--- /dev/null
+++ b/pkg/cinodefs/entrypoint_bb_test.go
@@ -0,0 +1,77 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs_test
+
+import (
+ "testing"
+
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/cinodefs/internal/protobuf"
+ "github.com/cinode/go/testvectors/testblobs"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/protobuf/proto"
+)
+
+func TestEntrypointFromStringFailures(t *testing.T) {
+ for _, d := range []struct {
+ s string
+ errContains string
+ }{
+ {"", "empty string"},
+ {"not-a-base64-string!!!!!!!!", "not a base58 string"},
+ {"aaaaaaaa", "protobuf parse error"},
+ } {
+ t.Run(d.s, func(t *testing.T) {
+ wi, err := cinodefs.EntrypointFromString(d.s)
+ require.ErrorIs(t, err, cinodefs.ErrInvalidEntrypointData)
+ require.ErrorContains(t, err, d.errContains)
+ require.Nil(t, wi)
+ })
+ }
+}
+
+func TestInvalidEntrypointData(t *testing.T) {
+ for _, d := range []struct {
+ n string
+ p *protobuf.Entrypoint
+ errContains string
+ }{
+ {
+ "invalid blob name",
+ &protobuf.Entrypoint{},
+ "invalid blob name",
+ },
+ {
+ "mime type set for link",
+ &protobuf.Entrypoint{
+ BlobName: testblobs.DynamicLink.BlobName.Bytes(),
+ MimeType: "test-mimetype",
+ },
+ "link can not have mimetype set",
+ },
+ } {
+ t.Run(d.n, func(t *testing.T) {
+ bytes, err := proto.Marshal(d.p)
+ require.NoError(t, err)
+
+ ep, err := cinodefs.EntrypointFromBytes(bytes)
+ require.ErrorIs(t, err, cinodefs.ErrInvalidEntrypointData)
+ require.ErrorContains(t, err, d.errContains)
+ require.Nil(t, ep)
+ })
+ }
+}
diff --git a/pkg/cinodefs/entrypoint_options.go b/pkg/cinodefs/entrypoint_options.go
new file mode 100644
index 0000000..be6d24f
--- /dev/null
+++ b/pkg/cinodefs/entrypoint_options.go
@@ -0,0 +1,43 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+import (
+ "context"
+)
+
+type EntrypointOption interface {
+ apply(ctx context.Context, ep *Entrypoint)
+}
+
+type entrypointOptionBasicFunc func(ep *Entrypoint)
+
+func (f entrypointOptionBasicFunc) apply(ctx context.Context, ep *Entrypoint) { f(ep) }
+
+func SetMimeType(mimeType string) EntrypointOption {
+ return entrypointOptionBasicFunc(func(ep *Entrypoint) {
+ ep.ep.MimeType = mimeType
+ })
+}
+
+func entrypointFromOptions(ctx context.Context, opts ...EntrypointOption) *Entrypoint {
+ ep := &Entrypoint{}
+ for _, o := range opts {
+ o.apply(ctx, ep)
+ }
+ return ep
+}
diff --git a/pkg/cinodefs/httphandler/http.go b/pkg/cinodefs/httphandler/http.go
new file mode 100644
index 0000000..0a1d4c1
--- /dev/null
+++ b/pkg/cinodefs/httphandler/http.go
@@ -0,0 +1,104 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package httphandler
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ "github.com/cinode/go/pkg/cinodefs"
+ "golang.org/x/exp/slog"
+)
+
+type Handler struct {
+ FS cinodefs.FS
+ IndexFile string
+ Log *slog.Logger
+}
+
+func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ log := h.Log.With(
+ slog.String("RemoteAddr", r.RemoteAddr),
+ slog.String("URL", r.URL.String()),
+ slog.String("Method", r.Method),
+ )
+
+ switch r.Method {
+ case "GET":
+ h.serveGet(w, r, log)
+ return
+ default:
+ log.Error("Method not allowed")
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ return
+ }
+}
+
+func (h *Handler) serveGet(w http.ResponseWriter, r *http.Request, log *slog.Logger) {
+ path := r.URL.Path
+ if strings.HasSuffix(path, "/") {
+ path += h.IndexFile
+ }
+
+ pathList := strings.Split(strings.TrimPrefix(path, "/"), "/")
+ fileEP, err := h.FS.FindEntry(r.Context(), pathList)
+ switch {
+ case errors.Is(err, cinodefs.ErrEntryNotFound),
+ errors.Is(err, cinodefs.ErrNotADirectory):
+ log.Warn("Not found")
+ http.NotFound(w, r)
+ return
+ case errors.Is(err, cinodefs.ErrModifiedDirectory):
+ // Can't get the entrypoint, but since it's a directory
+ // (only with unsaved changes), redirect to the directory itself
+ // that will in the end load the index file if present.
+ http.Redirect(w, r, r.URL.Path+"/", http.StatusTemporaryRedirect)
+ return
+ case h.handleHttpError(err, w, log, "Error finding entrypoint"):
+ return
+ }
+
+ if fileEP.IsDir() {
+ http.Redirect(w, r, r.URL.Path+"/", http.StatusTemporaryRedirect)
+ return
+ }
+
+ rc, err := h.FS.OpenEntrypointData(r.Context(), fileEP)
+ if h.handleHttpError(err, w, log, "Error opening file") {
+ return
+ }
+ defer rc.Close()
+
+ w.Header().Set("Content-Type", fileEP.MimeType())
+ _, err = io.Copy(w, rc)
+ h.handleHttpError(err, w, log, "Error sending file")
+}
+
+func (h *Handler) handleHttpError(err error, w http.ResponseWriter, log *slog.Logger, logMsg string) bool {
+ if err != nil {
+ log.Error(logMsg, "err", err)
+ http.Error(w,
+ fmt.Sprintf("%s: %v", http.StatusText(http.StatusInternalServerError), err),
+ http.StatusInternalServerError,
+ )
+ return true
+ }
+ return false
+}
diff --git a/pkg/cinodefs/httphandler/http_test.go b/pkg/cinodefs/httphandler/http_test.go
new file mode 100644
index 0000000..25b5f9f
--- /dev/null
+++ b/pkg/cinodefs/httphandler/http_test.go
@@ -0,0 +1,245 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package httphandler
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "testing/iotest"
+
+ "github.com/cinode/go/pkg/blenc"
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/common"
+ "github.com/cinode/go/pkg/datastore"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+ "golang.org/x/exp/slog"
+)
+
+type mockDatastore struct {
+ datastore.DS
+ openFunc func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error)
+}
+
+func (m *mockDatastore) Open(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
+ if m.openFunc != nil {
+ return m.openFunc(ctx, name)
+ }
+ return m.DS.Open(ctx, name)
+}
+
+type HandlerTestSuite struct {
+ suite.Suite
+
+ ds mockDatastore
+ fs cinodefs.FS
+ handler *Handler
+ server *httptest.Server
+ logData *bytes.Buffer
+}
+
+func TestHandlerTestSuite(t *testing.T) {
+ suite.Run(t, &HandlerTestSuite{})
+}
+
+func (s *HandlerTestSuite) SetupTest() {
+ s.ds = mockDatastore{DS: datastore.InMemory()}
+ fs, err := cinodefs.New(
+ context.Background(),
+ blenc.FromDatastore(&s.ds),
+ cinodefs.NewRootStaticDirectory(),
+ )
+ require.NoError(s.T(), err)
+ s.fs = fs
+
+ s.logData = bytes.NewBuffer(nil)
+ log := slog.New(slog.NewJSONHandler(
+ s.logData,
+ &slog.HandlerOptions{Level: slog.LevelDebug},
+ ))
+
+ s.handler = &Handler{
+ FS: fs,
+ IndexFile: "index.html",
+ Log: log,
+ }
+ s.server = httptest.NewServer(s.handler)
+ s.T().Cleanup(s.server.Close)
+}
+
+func (s *HandlerTestSuite) setEntry(t *testing.T, data string, path ...string) {
+ _, err := s.fs.SetEntryFile(
+ context.Background(),
+ path,
+ strings.NewReader(data),
+ )
+ require.NoError(t, err)
+}
+
+func (s *HandlerTestSuite) getEntry(t *testing.T, path string) (string, string, int) {
+ resp, err := http.Get(s.server.URL + path)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+
+ data, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ return string(data), resp.Header.Get("content-type"), resp.StatusCode
+}
+
+func (s *HandlerTestSuite) getData(t *testing.T, path string) string {
+ data, _, code := s.getEntry(t, path)
+ require.Equal(t, http.StatusOK, code)
+ return data
+}
+
+func (s *HandlerTestSuite) TestSuccessfulFileDownload() {
+ s.setEntry(s.T(), "hello", "file.txt")
+ readBack := s.getData(s.T(), "/file.txt")
+ require.Equal(s.T(), "hello", readBack)
+}
+
+func (s *HandlerTestSuite) TestNonGetRequest() {
+ t := s.T()
+ resp, err := http.Post(s.server.URL, "text/plain", strings.NewReader("Hello world!"))
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ require.Equal(t, http.StatusMethodNotAllowed, resp.StatusCode)
+}
+
+func (s *HandlerTestSuite) TestNotFound() {
+ _, err := s.fs.SetEntryFile(context.Background(), []string{"hello.txt"}, strings.NewReader("hello"))
+ require.NoError(s.T(), err)
+
+ _, _, code := s.getEntry(s.T(), "/no-hello.txt")
+ require.Equal(s.T(), http.StatusNotFound, code)
+
+ _, _, code = s.getEntry(s.T(), "/hello.txt/world")
+ require.Equal(s.T(), http.StatusNotFound, code)
+}
+
+func (s *HandlerTestSuite) TestReadIndexFile() {
+ s.setEntry(s.T(), "hello", "dir", "index.html")
+
+ // Repeat twice, once before and once after flush
+ for i := 0; i < 2; i++ {
+ readBack := s.getData(s.T(), "/dir")
+ require.Equal(s.T(), "hello", readBack)
+
+ err := s.fs.Flush(context.Background())
+ require.NoError(s.T(), err)
+ }
+}
+
+func (s *HandlerTestSuite) TestReadErrors() {
+ // Strictly controlled list of blob ids accessed, if at any time blob names
+ // would change, that would mean change in blob hashing algorithm
+ const bNameDir = "KAJgH9GYbmHxp4MUZvLswDh4t2TjTfVECAMmmv7MAzSZF"
+ const bNameFile = "pKFmwKyCeLeHjFRiwhGaajuhupPg5tS61tcL6F7sjBHRW"
+
+ s.setEntry(s.T(), "hello", "file.txt")
+
+ err := s.fs.Flush(context.Background())
+ require.NoError(s.T(), err)
+
+ s.T().Run("dir read error", func(t *testing.T) {
+ mockErr := errors.New("mock error dir")
+ s.ds.openFunc = func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
+ switch n := name.String(); n {
+ case bNameDir:
+ return nil, mockErr
+ case bNameFile:
+ return s.ds.DS.Open(ctx, name)
+ default:
+ panic("Unrecognized blob: " + n)
+ }
+ }
+ defer func() { s.ds.openFunc = nil }()
+
+ _, _, code := s.getEntry(t, "/file.txt")
+ require.Equal(t, http.StatusInternalServerError, code)
+ require.Contains(t, s.logData.String(), mockErr.Error())
+ })
+
+ s.T().Run("file open error", func(t *testing.T) {
+ mockErr := errors.New("mock error file open")
+ s.ds.openFunc = func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
+ switch n := name.String(); n {
+ case bNameDir:
+ return s.ds.DS.Open(ctx, name)
+ case bNameFile:
+ return nil, mockErr
+ default:
+ panic("Unrecognized blob: " + n)
+ }
+ }
+ defer func() { s.ds.openFunc = nil }()
+
+ _, _, code := s.getEntry(t, "/file.txt")
+ require.Equal(t, http.StatusInternalServerError, code)
+ require.Contains(t, s.logData.String(), mockErr.Error())
+ })
+
+ s.T().Run("file read error with error header", func(t *testing.T) {
+ mockErr := errors.New("mock error file read with headers")
+ s.ds.openFunc = func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
+ switch n := name.String(); n {
+ case bNameDir:
+ return s.ds.DS.Open(ctx, name)
+ case bNameFile:
+ return io.NopCloser(iotest.ErrReader(mockErr)), nil
+ default:
+ panic("Unrecognized blob: " + n)
+ }
+ }
+ defer func() { s.ds.openFunc = nil }()
+
+ _, _, code := s.getEntry(t, "/file.txt")
+ require.Equal(t, http.StatusInternalServerError, code)
+ require.Contains(t, s.logData.String(), mockErr.Error())
+ })
+
+ s.T().Run("file read error with partially sent data", func(t *testing.T) {
+ mockErr := errors.New("mock error file read without headers")
+ s.ds.openFunc = func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
+ switch n := name.String(); n {
+ case bNameDir:
+ return s.ds.DS.Open(ctx, name)
+ case bNameFile:
+ return io.NopCloser(io.MultiReader(
+ strings.NewReader("hello world!"),
+ iotest.ErrReader(mockErr),
+ )), nil
+ default:
+ panic("Unrecognized blob: " + n)
+ }
+ }
+ defer func() { s.ds.openFunc = nil }()
+
+ content, _, _ := s.getEntry(t, "/file.txt")
+ // Since headers were already sent, there's no way to report back an error,
+ // we can only check if logs contain some error information
+ require.Contains(t, s.logData.String(), mockErr.Error())
+ require.Contains(t, content, http.StatusText(http.StatusInternalServerError))
+ })
+}
diff --git a/pkg/protobuf/protobuf.pb.go b/pkg/cinodefs/internal/protobuf/protobuf.pb.go
similarity index 100%
rename from pkg/protobuf/protobuf.pb.go
rename to pkg/cinodefs/internal/protobuf/protobuf.pb.go
diff --git a/pkg/protobuf/protobuf.proto b/pkg/cinodefs/internal/protobuf/protobuf.proto
similarity index 100%
rename from pkg/protobuf/protobuf.proto
rename to pkg/cinodefs/internal/protobuf/protobuf.proto
diff --git a/pkg/cinodefs/node.go b/pkg/cinodefs/node.go
new file mode 100644
index 0000000..6bbe9b2
--- /dev/null
+++ b/pkg/cinodefs/node.go
@@ -0,0 +1,81 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+//
+// cached entries:
+// * unloaded entry - we only have entrypoint data
+// * directory - either clean (with existing entrypoint) or dirty (modified entries, not yet flushed)
+// * link - either clean (with tarted stored) or dirty (target changed but not yet flushed)
+// * file - entrypoint to static blob
+//
+// node states:
+// * if unloaded entry - contains entrypoint to the element, from entrypoint it can be deduced if this
+// is a dynamic link (from blob name) or directory (from mime type), this node does not need flushing
+// * node is dirty directly - the node was modified, its entrypoint can not be deduced before the node
+// is flushed, some modifications are kept in memory and can still be lost
+// * sub-nodes are dirty - the node itself is not dirty but some sub-nodes are. The node itself can have
+// entrypoint deduced because it will not change, but some sub-nodes will need flushing to persist the
+// data. Such situation is caused by dynamic links - the target can require flushing but the link itself
+// will preserve its entrypoint.
+//
+
+import (
+ "context"
+)
+
+type dirtyState byte
+
+const (
+ // node and its sub-nodes are all clear, this sub-graph does not require flushing and is fully persisted
+ dsClean dirtyState = 0
+
+ // node is dirty, requires flushing to persist data
+ dsDirty dirtyState = 1
+
+ // node is itself clean, but some sub-nodes are dirty, flushing will be forwarded to sub-nodes
+ dsSubDirty dirtyState = 2
+)
+
+// node is a base interface required by all cached entries
+type node interface {
+ // returns dirty state of this entrypoint
+ dirty() dirtyState
+
+ // flush this entrypoint
+ flush(ctx context.Context, gc *graphContext) (node, *Entrypoint, error)
+
+ // traverse node
+ traverse(
+ ctx context.Context,
+ gc *graphContext,
+ path []string,
+ pathPosition int,
+ linkDepth int,
+ isWritable bool,
+ opts traverseOptions,
+ whenReached traverseGoalFunc,
+ ) (
+ replacementNode node,
+ state dirtyState,
+ err error,
+ )
+
+ // get current entrypoint value, do not flush before, if node is not flushed
+ // it must return appropriate error
+ entrypoint() (*Entrypoint, error)
+}
diff --git a/pkg/cinodefs/node_directory.go b/pkg/cinodefs/node_directory.go
new file mode 100644
index 0000000..8ba8c1e
--- /dev/null
+++ b/pkg/cinodefs/node_directory.go
@@ -0,0 +1,242 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+import (
+ "context"
+ "sort"
+
+ "github.com/cinode/go/pkg/blobtypes"
+ "github.com/cinode/go/pkg/cinodefs/internal/protobuf"
+ "github.com/cinode/go/pkg/utilities/golang"
+)
+
+// nodeDirectory holds a directory entry loaded into memory
+type nodeDirectory struct {
+ entries map[string]node
+ stored *Entrypoint // current entrypoint, will be nil if directory was modified
+ dState dirtyState // true if any subtree is dirty
+}
+
+func (d *nodeDirectory) dirty() dirtyState {
+ return d.dState
+}
+
+func (d *nodeDirectory) flush(ctx context.Context, gc *graphContext) (node, *Entrypoint, error) {
+ if d.dState == dsClean {
+ // all clear, nothing to flush here or in sub-trees
+ return d, d.stored, nil
+ }
+
+ if d.dState == dsSubDirty {
+ // Some sub-nodes are dirty, need to propagate flush to
+ flushedEntries := make(map[string]node, len(d.entries))
+ for name, entry := range d.entries {
+ target, _, err := entry.flush(ctx, gc)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ flushedEntries[name] = target
+ }
+
+ // directory itself was not modified and does not need flush, don't bother
+ // saving it to datastore
+ return &nodeDirectory{
+ entries: flushedEntries,
+ stored: d.stored,
+ dState: dsClean,
+ }, d.stored, nil
+ }
+
+ golang.Assert(d.dState == dsDirty, "ensure correct dirtiness state")
+
+ // Directory has changed, have to recalculate its blob and save it in data store
+ dir := protobuf.Directory{
+ Entries: make([]*protobuf.Directory_Entry, 0, len(d.entries)),
+ }
+ flushedEntries := make(map[string]node, len(d.entries))
+ for name, entry := range d.entries {
+ target, targetEP, err := entry.flush(ctx, gc)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ flushedEntries[name] = target
+ dir.Entries = append(dir.Entries, &protobuf.Directory_Entry{
+ Name: name,
+ Ep: &targetEP.ep,
+ })
+ }
+
+ // Sort by name - that way we gain deterministic order during
+ // serialization od the directory
+ sort.Slice(dir.Entries, func(i, j int) bool {
+ return dir.Entries[i].Name < dir.Entries[j].Name
+ })
+
+ ep, err := gc.createProtobufMessage(ctx, blobtypes.Static, &dir)
+ if err != nil {
+ return nil, nil, err
+ }
+ ep.ep.MimeType = CinodeDirMimeType
+
+ return &nodeDirectory{
+ entries: flushedEntries,
+ stored: ep,
+ dState: dsClean,
+ }, ep, nil
+}
+
+func (c *nodeDirectory) traverse(
+ ctx context.Context,
+ gc *graphContext,
+ path []string,
+ pathPosition int,
+ linkDepth int,
+ isWritable bool,
+ opts traverseOptions,
+ whenReached traverseGoalFunc,
+) (
+ node,
+ dirtyState,
+ error,
+) {
+ if pathPosition == len(path) {
+ return whenReached(ctx, c, isWritable)
+ }
+
+ subNode, found := c.entries[path[pathPosition]]
+ if !found {
+ if !opts.createNodes {
+ return nil, 0, ErrEntryNotFound
+ }
+ if !isWritable {
+ return nil, 0, ErrMissingWriterInfo
+ }
+ // create new sub-path
+ newNode, err := c.traverseRecursiveNewPath(
+ ctx,
+ path,
+ pathPosition+1,
+ opts,
+ whenReached,
+ )
+ if err != nil {
+ return nil, 0, err
+ }
+ c.entries[path[pathPosition]] = newNode
+ c.dState = dsDirty
+ return c, dsDirty, nil
+ }
+
+ // found path entry, descend to sub-node
+ replacement, replacementState, err := subNode.traverse(
+ ctx,
+ gc,
+ path,
+ pathPosition+1,
+ 0,
+ isWritable,
+ opts,
+ whenReached,
+ )
+ if err != nil {
+ return nil, 0, err
+ }
+ if opts.doNotCache {
+ return c, dsClean, nil
+ }
+
+ c.entries[path[pathPosition]] = replacement
+ if replacementState == dsDirty {
+ // child is dirty, this propagates down to the current node
+ c.dState = dsDirty
+ return c, dsDirty, nil
+ }
+
+ if replacementState == dsSubDirty {
+ // child itself is not dirty, but some sub-node is, sub-dirtiness
+ // propagates to the current node, but if the directory is
+ // already directly dirty (stronger dirtiness), keep it as it is
+ if c.dState != dsDirty {
+ c.dState = dsSubDirty
+ }
+ return c, dsSubDirty, nil
+ }
+
+ golang.Assert(replacementState == dsClean, "ensure correct dirtiness state")
+ // leave current state as it is
+ return c, dsClean, nil
+
+}
+
+func (c *nodeDirectory) traverseRecursiveNewPath(
+ ctx context.Context,
+ path []string,
+ pathPosition int,
+ opts traverseOptions,
+ whenReached traverseGoalFunc,
+) (
+ node,
+ error,
+) {
+ if len(path) == pathPosition {
+ replacement, _, err := whenReached(ctx, nil, true)
+ return replacement, err
+ }
+
+ sub, err := c.traverseRecursiveNewPath(
+ ctx,
+ path,
+ pathPosition+1,
+ opts,
+ whenReached,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return &nodeDirectory{
+ entries: map[string]node{
+ path[pathPosition]: sub,
+ },
+ dState: dsDirty,
+ }, nil
+}
+
+func (c *nodeDirectory) entrypoint() (*Entrypoint, error) {
+ if c.dState == dsDirty {
+ return nil, ErrModifiedDirectory
+ }
+
+ golang.Assert(
+ c.dState == dsClean || c.dState == dsSubDirty,
+ "ensure dirtiness state is valid",
+ )
+
+ return c.stored, nil
+}
+
+func (c *nodeDirectory) deleteEntry(name string) bool {
+ if _, hasEntry := c.entries[name]; !hasEntry {
+ return false
+ }
+ delete(c.entries, name)
+ c.dState = dsDirty
+ return true
+}
diff --git a/pkg/cinodefs/node_file.go b/pkg/cinodefs/node_file.go
new file mode 100644
index 0000000..77347e5
--- /dev/null
+++ b/pkg/cinodefs/node_file.go
@@ -0,0 +1,60 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+import (
+ "context"
+)
+
+// Entry is a file with its entrypoint
+type nodeFile struct {
+ ep *Entrypoint
+}
+
+func (c *nodeFile) dirty() dirtyState {
+ return dsClean
+}
+
+func (c *nodeFile) flush(ctx context.Context, gc *graphContext) (node, *Entrypoint, error) {
+ return c, c.ep, nil
+}
+
+func (c *nodeFile) traverse(
+ ctx context.Context,
+ gc *graphContext,
+ path []string,
+ pathPosition int,
+ linkDepth int,
+ isWritable bool,
+ opts traverseOptions,
+ whenReached traverseGoalFunc,
+) (
+ node,
+ dirtyState,
+ error,
+) {
+ if pathPosition == len(path) {
+ return whenReached(ctx, c, isWritable)
+ }
+
+ // We're supposed to traverse into sub-path but it's not a directory
+ return nil, 0, ErrNotADirectory
+}
+
+func (c *nodeFile) entrypoint() (*Entrypoint, error) {
+ return c.ep, nil
+}
diff --git a/pkg/cinodefs/node_link.go b/pkg/cinodefs/node_link.go
new file mode 100644
index 0000000..140228b
--- /dev/null
+++ b/pkg/cinodefs/node_link.go
@@ -0,0 +1,127 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+import (
+ "context"
+
+ "github.com/cinode/go/pkg/utilities/golang"
+)
+
+// Entry is a link loaded into memory
+type nodeLink struct {
+ ep *Entrypoint // entrypoint of the link itself
+ target node // target for the link
+ dState dirtyState
+}
+
+func (c *nodeLink) dirty() dirtyState {
+ return c.dState
+}
+
+func (c *nodeLink) flush(ctx context.Context, gc *graphContext) (node, *Entrypoint, error) {
+ if c.dState == dsClean {
+ // all clear
+ return c, c.ep, nil
+ }
+
+ golang.Assert(c.dState == dsSubDirty, "link can be clean or sub-dirty")
+ target, targetEP, err := c.target.flush(ctx, gc)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = gc.updateProtobufMessage(ctx, c.ep, &targetEP.ep)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ ret := &nodeLink{
+ ep: c.ep,
+ target: target,
+ dState: dsClean,
+ }
+
+ return ret, ret.ep, nil
+}
+
+func (c *nodeLink) traverse(
+ ctx context.Context,
+ gc *graphContext,
+ path []string,
+ pathPosition int,
+ linkDepth int,
+ isWritable bool,
+ opts traverseOptions,
+ whenReached traverseGoalFunc,
+) (
+ node,
+ dirtyState,
+ error,
+) {
+ if linkDepth >= opts.maxLinkRedirects {
+ return nil, 0, ErrTooManyRedirects
+ }
+
+ // Note: we don't stop here even if we've reached the end of
+ // traverse path, delegate traversal to target node instead
+
+ // crossing link border, whether sub-graph is writeable is determined
+ // by availability of corresponding writer info
+ _, hasAuthInfo := gc.authInfos[c.ep.bn.String()]
+
+ newTarget, targetState, err := c.target.traverse(
+ ctx,
+ gc,
+ path,
+ pathPosition,
+ linkDepth+1,
+ hasAuthInfo,
+ opts,
+ whenReached,
+ )
+ if err != nil {
+ return nil, 0, err
+ }
+
+ if opts.doNotCache {
+ return c, dsClean, nil
+ }
+
+ c.target = newTarget
+ if targetState == dsClean {
+ // Nothing to do
+ //
+ // Note: this path will happen once we keep clean nodes
+ // in the memory for caching purposes
+ return c, dsClean, nil
+ }
+
+ golang.Assert(
+ targetState == dsDirty || targetState == dsSubDirty,
+ "ensure correct dirtiness state",
+ )
+
+ // sub-dirty propagates normally, dirty becomes sub-dirty
+ // because link's entrypoint never changes
+ c.dState = dsSubDirty
+ return c, dsSubDirty, nil
+}
+
+func (c *nodeLink) entrypoint() (*Entrypoint, error) {
+ return c.ep, nil
+}
diff --git a/pkg/cinodefs/node_unloaded.go b/pkg/cinodefs/node_unloaded.go
new file mode 100644
index 0000000..cd39294
--- /dev/null
+++ b/pkg/cinodefs/node_unloaded.go
@@ -0,0 +1,135 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/cinode/go/pkg/cinodefs/internal/protobuf"
+)
+
+type nodeUnloaded struct {
+ ep *Entrypoint
+}
+
+func (c *nodeUnloaded) dirty() dirtyState {
+ return dsClean
+}
+
+func (c *nodeUnloaded) flush(ctx context.Context, gc *graphContext) (node, *Entrypoint, error) {
+ return c, c.ep, nil
+}
+
+func (c *nodeUnloaded) traverse(
+ ctx context.Context,
+ gc *graphContext,
+ path []string,
+ pathPosition int,
+ linkDepth int,
+ isWritable bool,
+ opts traverseOptions,
+ whenReached traverseGoalFunc,
+) (
+ node,
+ dirtyState,
+ error,
+) {
+ loaded, err := c.load(ctx, gc)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return loaded.traverse(
+ ctx,
+ gc,
+ path,
+ pathPosition,
+ linkDepth,
+ isWritable,
+ opts,
+ whenReached,
+ )
+}
+
+func (c *nodeUnloaded) load(ctx context.Context, gc *graphContext) (node, error) {
+ // Data is behind some entrypoint, try to load it
+ if c.ep.IsLink() {
+ return c.loadEntrypointLink(ctx, gc)
+ }
+
+ if c.ep.IsDir() {
+ return c.loadEntrypointDir(ctx, gc)
+ }
+
+ return &nodeFile{ep: c.ep}, nil
+}
+
+func (c *nodeUnloaded) loadEntrypointLink(ctx context.Context, gc *graphContext) (node, error) {
+ targetEP := &Entrypoint{}
+ err := gc.readProtobufMessage(ctx, c.ep, &targetEP.ep)
+ if err != nil {
+ return nil, fmt.Errorf("%w: %w", ErrCantOpenLink, err)
+ }
+
+ err = expandEntrypointProto(targetEP)
+ if err != nil {
+ return nil, fmt.Errorf("%w: %w", ErrCantOpenLink, err)
+ }
+
+ return &nodeLink{
+ ep: c.ep,
+ target: &nodeUnloaded{ep: targetEP},
+ dState: dsClean,
+ }, nil
+}
+
+func (c *nodeUnloaded) loadEntrypointDir(ctx context.Context, gc *graphContext) (node, error) {
+ msg := &protobuf.Directory{}
+ err := gc.readProtobufMessage(ctx, c.ep, msg)
+ if err != nil {
+ return nil, fmt.Errorf("%w: %w", ErrCantOpenDir, err)
+ }
+
+ dir := make(map[string]node, len(msg.Entries))
+
+ for _, entry := range msg.Entries {
+ if entry.Name == "" {
+ return nil, fmt.Errorf("%w: %w", ErrCantOpenDir, ErrEmptyName)
+ }
+ if _, exists := dir[entry.Name]; exists {
+ return nil, fmt.Errorf("%w: %s", ErrCantOpenDirDuplicateEntry, entry.Name)
+ }
+
+ ep, err := entrypointFromProtobuf(entry.Ep)
+ if err != nil {
+ return nil, fmt.Errorf("%w: %w", ErrCantOpenDir, err)
+ }
+
+ dir[entry.Name] = &nodeUnloaded{ep: ep}
+ }
+
+ return &nodeDirectory{
+ stored: c.ep,
+ entries: dir,
+ dState: dsClean,
+ }, nil
+}
+
+func (c *nodeUnloaded) entrypoint() (*Entrypoint, error) {
+ return c.ep, nil
+}
diff --git a/pkg/cinodefs/uploader/directory.go b/pkg/cinodefs/uploader/directory.go
new file mode 100644
index 0000000..6500832
--- /dev/null
+++ b/pkg/cinodefs/uploader/directory.go
@@ -0,0 +1,219 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uploader
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "html/template"
+ "io/fs"
+ "path"
+
+ _ "embed"
+
+ "github.com/cinode/go/pkg/blenc"
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/utilities/golang"
+ "golang.org/x/exp/slog"
+)
+
+const (
+ CinodeDirMimeType = "application/cinode-dir"
+)
+
+var (
+ ErrNotFound = blenc.ErrNotFound
+ ErrNotADirectory = errors.New("entry is not a directory")
+ ErrNotAFile = errors.New("entry is not a file")
+ ErrNotADirectoryOrAFile = errors.New("entry is neither a directory nor a regular file")
+)
+
+func UploadStaticDirectory(
+ ctx context.Context,
+ fsys fs.FS,
+ cfs cinodefs.FS,
+ opts ...Option,
+) error {
+ c := dirCompiler{
+ ctx: ctx,
+ fsys: fsys,
+ cfs: cfs,
+ log: slog.Default(),
+ }
+ for _, opt := range opts {
+ opt(&c)
+ }
+
+ _, err := c.compilePath(ctx, ".", c.basePath)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type Option func(d *dirCompiler)
+
+func BasePath(path ...string) Option {
+ return Option(func(d *dirCompiler) {
+ d.basePath = path
+ })
+}
+
+func CreateIndexFile(indexFile string) Option {
+ return Option(func(d *dirCompiler) {
+ d.createIndexFile = true
+ d.indexFileName = indexFile
+ })
+}
+
+type dirCompiler struct {
+ ctx context.Context
+ fsys fs.FS
+ cfs cinodefs.FS
+ log *slog.Logger
+ basePath []string
+ createIndexFile bool
+ indexFileName string
+}
+
+type dirEntry struct {
+ Name string
+ MimeType string
+ IsDir bool
+ Size int64
+}
+
+func (d *dirCompiler) compilePath(
+ ctx context.Context,
+ srcPath string,
+ destPath []string,
+) (*dirEntry, error) {
+ st, err := fs.Stat(d.fsys, srcPath)
+ if err != nil {
+ d.log.ErrorCtx(ctx, "failed to stat path", "path", srcPath, "err", err)
+ return nil, fmt.Errorf("couldn't check path: %w", err)
+ }
+
+ var name string
+ if len(destPath) > 0 {
+ name = destPath[len(destPath)-1]
+ }
+
+ if st.IsDir() {
+ size, err := d.compileDir(ctx, srcPath, destPath)
+ if err != nil {
+ return nil, err
+ }
+ return &dirEntry{
+ Name: name,
+ MimeType: cinodefs.CinodeDirMimeType,
+ IsDir: true,
+ Size: int64(size),
+ }, nil
+ }
+
+ if st.Mode().IsRegular() {
+ mime, err := d.compileFile(ctx, srcPath, destPath)
+ if err != nil {
+ return nil, err
+ }
+ return &dirEntry{
+ Name: name,
+ MimeType: mime,
+ IsDir: false,
+ Size: st.Size(),
+ }, nil
+ }
+
+ d.log.ErrorContext(ctx, "path is neither dir nor a regular file", "path", srcPath)
+ return nil, fmt.Errorf("%w: %v", ErrNotADirectoryOrAFile, srcPath)
+}
+
+func (d *dirCompiler) compileFile(ctx context.Context, srcPath string, dstPath []string) (string, error) {
+ d.log.InfoContext(ctx, "compiling file", "path", srcPath)
+ fl, err := d.fsys.Open(srcPath)
+ if err != nil {
+ d.log.ErrorContext(ctx, "failed to open file", "path", srcPath, "err", err)
+ return "", fmt.Errorf("couldn't open file %v: %w", srcPath, err)
+ }
+ defer fl.Close()
+
+ ep, err := d.cfs.SetEntryFile(ctx, dstPath, fl)
+ if err != nil {
+ return "", fmt.Errorf("failed to upload file %v: %w", srcPath, err)
+ }
+
+ return ep.MimeType(), nil
+}
+
+func (d *dirCompiler) compileDir(ctx context.Context, srcPath string, dstPath []string) (int, error) {
+ fileList, err := fs.ReadDir(d.fsys, srcPath)
+ if err != nil {
+ d.log.ErrorContext(ctx, "couldn't read contents of dir", "path", srcPath, "err", err)
+ return 0, fmt.Errorf("couldn't read contents of dir %v: %w", srcPath, err)
+ }
+
+ entries := make([]*dirEntry, 0, len(fileList))
+ hasIndex := false
+
+ for _, e := range fileList {
+ entry, err := d.compilePath(
+ ctx,
+ path.Join(srcPath, e.Name()),
+ append(dstPath, e.Name()),
+ )
+ if err != nil {
+ return 0, err
+ }
+
+ if entry.Name == d.indexFileName {
+ hasIndex = true
+ } else {
+ entries = append(entries, entry)
+ }
+ }
+
+ if d.createIndexFile && !hasIndex {
+ buf := bytes.NewBuffer(nil)
+ err = dirIndexTemplate.Execute(buf, map[string]any{
+ "entries": entries,
+ "indexName": d.indexFileName,
+ })
+ golang.Assert(err == nil, "template execution must not fail")
+
+ _, err = d.cfs.SetEntryFile(ctx,
+ append(dstPath, d.indexFileName),
+ bytes.NewReader(buf.Bytes()),
+ )
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ return len(fileList), nil
+}
+
+//go:embed templates/dir.html
+var _dirIndexTemplateStr string
+var dirIndexTemplate = golang.Must(
+ template.
+ New("dir").
+ Parse(_dirIndexTemplateStr),
+)
diff --git a/pkg/cinodefs/uploader/directory_test.go b/pkg/cinodefs/uploader/directory_test.go
new file mode 100644
index 0000000..d6162f3
--- /dev/null
+++ b/pkg/cinodefs/uploader/directory_test.go
@@ -0,0 +1,286 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uploader_test
+
+import (
+ "context"
+ "errors"
+ "io"
+ "io/fs"
+ "strings"
+ "testing"
+ "testing/fstest"
+
+ "github.com/cinode/go/pkg/blenc"
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/cinodefs/uploader"
+ "github.com/cinode/go/pkg/datastore"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+type DirectoryTestSuite struct {
+ suite.Suite
+
+ cfs cinodefs.FS
+}
+
+func TestDirectoryTestSuite(t *testing.T) {
+ suite.Run(t, &DirectoryTestSuite{})
+}
+
+func (s *DirectoryTestSuite) SetupTest() {
+ cfs, err := cinodefs.New(
+ context.Background(),
+ blenc.FromDatastore(datastore.InMemory()),
+ cinodefs.NewRootStaticDirectory(),
+ )
+ require.NoError(s.T(), err)
+ s.cfs = cfs
+}
+
+func (s *DirectoryTestSuite) singleFileFs() fstest.MapFS {
+ return fstest.MapFS{
+ "file.txt": &fstest.MapFile{Data: []byte("hello")},
+ }
+}
+
+type wrapFS struct {
+ fs.FS
+
+ openFunc func(path string) (fs.File, error)
+ statFunc func(name string) (fs.FileInfo, error)
+ readDirFunc func(name string) ([]fs.DirEntry, error)
+}
+
+func (w *wrapFS) Open(path string) (fs.File, error) {
+ if w.openFunc != nil {
+ return w.openFunc(path)
+ }
+ return w.FS.Open(path)
+}
+
+func (w *wrapFS) Stat(name string) (fs.FileInfo, error) {
+ if w.statFunc != nil {
+ return w.statFunc(name)
+ }
+ return fs.Stat(w.FS, name)
+}
+
+func (w *wrapFS) ReadDir(name string) ([]fs.DirEntry, error) {
+ if w.readDirFunc != nil {
+ return w.readDirFunc(name)
+ }
+ return fs.ReadDir(w.FS, name)
+}
+
+func (s *DirectoryTestSuite) uploadFS(t *testing.T, fs fs.FS, opts ...uploader.Option) {
+ err := uploader.UploadStaticDirectory(
+ context.Background(),
+ fs,
+ s.cfs,
+ opts...,
+ )
+ require.NoError(t, err)
+}
+
+func (s *DirectoryTestSuite) readContent(t *testing.T, path ...string) (string, error) {
+ rc, err := s.cfs.OpenEntryData(context.Background(), path)
+ if err != nil {
+ return "", err
+ }
+ defer rc.Close()
+ data, err := io.ReadAll(rc)
+ return string(data), err
+}
+
+func (s *DirectoryTestSuite) TestSingleFileUploadDefaultOptions() {
+ s.uploadFS(s.T(), s.singleFileFs())
+
+ readBack, err := s.readContent(s.T(), "file.txt")
+ require.NoError(s.T(), err)
+ require.Equal(s.T(), "hello", readBack)
+}
+
+func (s *DirectoryTestSuite) TestSingleFileUploadBasePath() {
+ s.uploadFS(s.T(), s.singleFileFs(), uploader.BasePath("sub", "dir"))
+
+ readBack, err := s.readContent(s.T(), "sub", "dir", "file.txt")
+ require.NoError(s.T(), err)
+ require.Equal(s.T(), "hello", readBack)
+
+ _, err = s.readContent(s.T(), "file.txt")
+ require.ErrorIs(s.T(), err, cinodefs.ErrEntryNotFound)
+}
+
+func (s *DirectoryTestSuite) TestSingleFileUploadWithIndexFile() {
+ s.uploadFS(s.T(), s.singleFileFs(), uploader.CreateIndexFile("index.html"))
+
+ readBack, err := s.readContent(s.T(), "index.html")
+ require.NoError(s.T(), err)
+ require.True(s.T(), strings.HasPrefix(readBack, "
+
+
+
+ Directory Listing
+
+
+
+
+ Directory Listing
+
+
+ |
+ Name |
+ Size |
+ MimeType |
+
+ {{- if eq (len .entries) 0 }}
+
+ — Empty — |
+
+ {{- else }}
+ {{- range .entries }}{{- if .IsDir }}
+
+ [DIR] |
+ {{ .Name }} |
+ {{ .Size }} entries |
+ {{ .MimeType }} |
+
+ {{- end }}{{- end }}
+ {{- range .entries }}{{- if not .IsDir }}
+
+ |
+ {{ .Name }} |
+ {{ .Size }} bytes |
+ {{ .MimeType }} |
+
+ {{- end }}{{- end }}
+ {{- end }}
+
+
+
+
diff --git a/pkg/cinodefs/writerinfo.go b/pkg/cinodefs/writerinfo.go
new file mode 100644
index 0000000..e9e562c
--- /dev/null
+++ b/pkg/cinodefs/writerinfo.go
@@ -0,0 +1,79 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/cinode/go/pkg/cinodefs/internal/protobuf"
+ "github.com/cinode/go/pkg/common"
+ "github.com/cinode/go/pkg/utilities/golang"
+ "github.com/jbenet/go-base58"
+ "google.golang.org/protobuf/proto"
+)
+
+var (
+ ErrInvalidWriterInfoData = errors.New("invalid writer info data")
+ ErrInvalidWriterInfoDataParse = fmt.Errorf("%w: protobuf parse error", ErrInvalidWriterInfoData)
+)
+
+type WriterInfo struct {
+ wi protobuf.WriterInfo
+}
+
+func (wi *WriterInfo) Bytes() []byte {
+ return golang.Must(proto.Marshal(&wi.wi))
+}
+
+func (wi *WriterInfo) String() string {
+ return base58.Encode(wi.Bytes())
+}
+
+func WriterInfoFromString(s string) (*WriterInfo, error) {
+ if len(s) == 0 {
+ return nil, fmt.Errorf("%w: empty string", ErrInvalidWriterInfoData)
+ }
+
+ b := base58.Decode(s)
+ if len(b) == 0 {
+ return nil, fmt.Errorf("%w: not a base58 string", ErrInvalidWriterInfoData)
+ }
+
+ return WriterInfoFromBytes(b)
+}
+
+func WriterInfoFromBytes(b []byte) (*WriterInfo, error) {
+ wi := WriterInfo{}
+
+ err := proto.Unmarshal(b, &wi.wi)
+ if err != nil {
+ return nil, fmt.Errorf("%w: %s", ErrInvalidWriterInfoDataParse, err)
+ }
+
+ return &wi, nil
+}
+
+func writerInfoFromBlobNameKeyAndAuthInfo(bn *common.BlobName, key *common.BlobKey, authInfo *common.AuthInfo) *WriterInfo {
+ return &WriterInfo{
+ wi: protobuf.WriterInfo{
+ BlobName: bn.Bytes(),
+ Key: key.Bytes(),
+ AuthInfo: authInfo.Bytes(),
+ },
+ }
+}
diff --git a/pkg/cinodefs/writerinfo_bb_test.go b/pkg/cinodefs/writerinfo_bb_test.go
new file mode 100644
index 0000000..72d2b34
--- /dev/null
+++ b/pkg/cinodefs/writerinfo_bb_test.go
@@ -0,0 +1,42 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinodefs_test
+
+import (
+ "testing"
+
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/stretchr/testify/require"
+)
+
+func TestWriterInfoFromStringFailures(t *testing.T) {
+ for _, d := range []struct {
+ s string
+ errContains string
+ }{
+ {"", "empty string"},
+ {"not-a-base64-string!!!!!!!!", "not a base58 string"},
+ {"aaaaaaaa", "protobuf parse error"},
+ } {
+ t.Run(d.s, func(t *testing.T) {
+ wi, err := cinodefs.WriterInfoFromString(d.s)
+ require.ErrorIs(t, err, cinodefs.ErrInvalidWriterInfoData)
+ require.ErrorContains(t, err, d.errContains)
+ require.Nil(t, wi)
+ })
+ }
+}
diff --git a/pkg/cmd/cinode_web_proxy/integration_test.go b/pkg/cmd/cinode_web_proxy/integration_test.go
index 34acc0f..a76bdde 100644
--- a/pkg/cmd/cinode_web_proxy/integration_test.go
+++ b/pkg/cmd/cinode_web_proxy/integration_test.go
@@ -29,17 +29,17 @@ import (
"time"
"github.com/cinode/go/pkg/blenc"
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/cinodefs/uploader"
"github.com/cinode/go/pkg/cmd/cinode_web_proxy"
"github.com/cinode/go/pkg/datastore"
- "github.com/cinode/go/pkg/structure"
- "github.com/jbenet/go-base58"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slog"
)
func TestIntegration(t *testing.T) {
- // Prepare test filesystem
+ os.Clearenv()
+ // Prepare test filesystem
testFS := fstest.MapFS{
"index.html": &fstest.MapFile{
Data: []byte("Hello world!"),
@@ -64,18 +64,27 @@ func TestIntegration(t *testing.T) {
ds, err := datastore.InRawFileSystem(dir)
require.NoError(t, err)
- ep, err := structure.UploadStaticDirectory(
+ cfs, err := cinodefs.New(
context.Background(),
- slog.Default(),
- testFS,
blenc.FromDatastore(ds),
+ cinodefs.NewRootStaticDirectory(),
+ )
+ require.NoError(t, err)
+
+ err = uploader.UploadStaticDirectory(
+ context.Background(),
+ testFS,
+ cfs,
)
require.NoError(t, err)
- epBytes, err := ep.ToBytes()
+ err = cfs.Flush(context.Background())
+ require.NoError(t, err)
+
+ ep, err := cfs.RootEntrypoint()
require.NoError(t, err)
- t.Setenv("CINODE_ENTRYPOINT", base58.Encode(epBytes))
+ t.Setenv("CINODE_ENTRYPOINT", ep.String())
runAndValidateCinodeProxy := func() {
ctx, cancel := context.WithCancel(context.Background())
diff --git a/pkg/cmd/cinode_web_proxy/root.go b/pkg/cmd/cinode_web_proxy/root.go
index d8cf606..c3d4658 100644
--- a/pkg/cmd/cinode_web_proxy/root.go
+++ b/pkg/cmd/cinode_web_proxy/root.go
@@ -25,15 +25,16 @@ import (
"os"
"runtime"
"sort"
+ "strconv"
"strings"
"time"
"github.com/cinode/go/pkg/blenc"
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/cinodefs/httphandler"
"github.com/cinode/go/pkg/datastore"
- "github.com/cinode/go/pkg/protobuf"
- "github.com/cinode/go/pkg/structure"
+ "github.com/cinode/go/pkg/utilities/golang"
"github.com/cinode/go/pkg/utilities/httpserver"
- "github.com/jbenet/go-base58"
"golang.org/x/exp/slog"
)
@@ -60,13 +61,9 @@ func executeWithConfig(ctx context.Context, cfg *config) error {
additionalDSs = append(additionalDSs, ds)
}
- entrypointRaw := base58.Decode(cfg.entrypoint)
- if len(entrypointRaw) == 0 {
- return errors.New("could not decode base58 entrypoint data")
- }
- entrypoint, err := protobuf.EntryPointFromBytes(entrypointRaw)
+ entrypoint, err := cinodefs.EntrypointFromString(cfg.entrypoint)
if err != nil {
- return fmt.Errorf("could not unmarshal entrypoint data: %w", err)
+ return fmt.Errorf("could not parse entrypoint data: %w", err)
}
log := slog.Default()
@@ -74,6 +71,8 @@ func executeWithConfig(ctx context.Context, cfg *config) error {
log.Info("Server listening for connections",
"address", fmt.Sprintf("http://localhost:%d", cfg.port),
)
+ log.Info("Main datastore", "addr", cfg.mainDSLocation)
+ log.Info("Additional datastores", "addrs", cfg.additionalDSLocations)
log.Info("System info",
"goos", runtime.GOOS,
@@ -82,7 +81,8 @@ func executeWithConfig(ctx context.Context, cfg *config) error {
"cpus", runtime.NumCPU(),
)
- handler := setupCinodeProxy(mainDS, additionalDSs, entrypoint)
+ handler := setupCinodeProxy(ctx, mainDS, additionalDSs, entrypoint)
+
return httpserver.RunGracefully(ctx,
handler,
httpserver.ListenPort(cfg.port),
@@ -91,21 +91,28 @@ func executeWithConfig(ctx context.Context, cfg *config) error {
}
func setupCinodeProxy(
+ ctx context.Context,
mainDS datastore.DS,
additionalDSs []datastore.DS,
- entrypoint *protobuf.Entrypoint,
+ entrypoint *cinodefs.Entrypoint,
) http.Handler {
- fs := structure.CinodeFS{
- BE: blenc.FromDatastore(
- datastore.NewMultiSource(mainDS, time.Hour, additionalDSs...),
+ fs := golang.Must(cinodefs.New(
+ ctx,
+ blenc.FromDatastore(
+ datastore.NewMultiSource(
+ mainDS,
+ time.Hour,
+ additionalDSs...,
+ ),
),
- RootEntrypoint: entrypoint,
- MaxLinkRedirects: 10,
- }
+ cinodefs.RootEntrypoint(entrypoint),
+ cinodefs.MaxLinkRedirects(10),
+ ))
- return &structure.HTTPHandler{
- FS: &fs,
+ return &httphandler.Handler{
+ FS: fs,
IndexFile: "index.html",
+ Log: slog.Default(),
}
}
@@ -152,7 +159,19 @@ func getConfig() (*config, error) {
cfg.additionalDSLocations = append(cfg.additionalDSLocations, location)
}
- cfg.port = 8080
+ port := os.Getenv("CINODE_LISTEN_PORT")
+ if port == "" {
+ cfg.port = 8080
+ } else {
+ portNum, err := strconv.Atoi(port)
+ if err == nil && (portNum < 0 || portNum > 65535) {
+ err = fmt.Errorf("not in range 0..65535")
+ }
+ if err != nil {
+ return nil, fmt.Errorf("invalid listen port %s: %w", port, err)
+ }
+ cfg.port = portNum
+ }
return &cfg, nil
}
diff --git a/pkg/cmd/cinode_web_proxy/root_test.go b/pkg/cmd/cinode_web_proxy/root_test.go
index 68c336a..f55f8ba 100644
--- a/pkg/cmd/cinode_web_proxy/root_test.go
+++ b/pkg/cmd/cinode_web_proxy/root_test.go
@@ -30,18 +30,19 @@ import (
"github.com/cinode/go/pkg/blenc"
"github.com/cinode/go/pkg/blobtypes"
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/cinodefs/uploader"
"github.com/cinode/go/pkg/common"
"github.com/cinode/go/pkg/datastore"
"github.com/cinode/go/pkg/internal/utilities/cipherfactory"
- "github.com/cinode/go/pkg/protobuf"
- "github.com/cinode/go/pkg/structure"
"github.com/cinode/go/testvectors/testblobs"
"github.com/jbenet/go-base58"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slog"
)
func TestGetConfig(t *testing.T) {
+ os.Clearenv()
+
t.Run("default config", func(t *testing.T) {
cfg, err := getConfig()
require.ErrorContains(t, err, "ENTRYPOINT")
@@ -102,6 +103,25 @@ func TestGetConfig(t *testing.T) {
"additional3",
})
})
+
+ t.Run("set listen port", func(t *testing.T) {
+ t.Setenv("CINODE_LISTEN_PORT", "12345")
+ cfg, err := getConfig()
+ require.NoError(t, err)
+ require.Equal(t, 12345, cfg.port)
+ })
+
+ t.Run("invalid port - not a number", func(t *testing.T) {
+ t.Setenv("CINODE_LISTEN_PORT", "123-45")
+ _, err := getConfig()
+ require.ErrorContains(t, err, "invalid listen port")
+ })
+
+ t.Run("invalid port - outside range", func(t *testing.T) {
+ t.Setenv("CINODE_LISTEN_PORT", "-1")
+ _, err := getConfig()
+ require.ErrorContains(t, err, "invalid listen port")
+ })
}
func TestWebProxyHandlerInvalidEntrypoint(t *testing.T) {
@@ -111,16 +131,13 @@ func TestWebProxyHandlerInvalidEntrypoint(t *testing.T) {
)
require.NoError(t, err)
+ key := cipherfactory.NewKeyGenerator(blobtypes.Static).Generate()
+
handler := setupCinodeProxy(
+ context.Background(),
datastore.InMemory(),
[]datastore.DS{},
- &protobuf.Entrypoint{
- BlobName: n,
- MimeType: structure.CinodeDirMimeType,
- KeyInfo: &protobuf.KeyInfo{
- Key: cipherfactory.NewKeyGenerator(blobtypes.Static).Generate(),
- },
- },
+ cinodefs.EntrypointFromBlobNameAndKey(n, key),
)
server := httptest.NewServer(handler)
@@ -148,7 +165,7 @@ func TestWebProxyHandlerSimplePage(t *testing.T) {
ds := datastore.InMemory()
be := blenc.FromDatastore(ds)
- ep := func() *protobuf.Entrypoint {
+ ep := func() *cinodefs.Entrypoint {
dir := t.TempDir()
for name, content := range map[string]string{
@@ -162,12 +179,25 @@ func TestWebProxyHandlerSimplePage(t *testing.T) {
require.NoError(t, err)
}
- ep, err := structure.UploadStaticDirectory(context.Background(), slog.Default(), os.DirFS(dir), be)
+ fs, err := cinodefs.New(context.Background(), be, cinodefs.NewRootDynamicLink())
+ require.NoError(t, err)
+
+ err = uploader.UploadStaticDirectory(
+ context.Background(),
+ os.DirFS(dir),
+ fs,
+ )
+ require.NoError(t, err)
+
+ err = fs.Flush(context.Background())
+ require.NoError(t, err)
+
+ ep, err := fs.RootEntrypoint()
require.NoError(t, err)
return ep
}()
- handler := setupCinodeProxy(ds, []datastore.DS{}, ep)
+ handler := setupCinodeProxy(context.Background(), ds, []datastore.DS{}, ep)
server := httptest.NewServer(handler)
defer server.Close()
@@ -224,7 +254,7 @@ func TestExecuteWithConfig(t *testing.T) {
mainDSLocation: "memory://",
entrypoint: "!@#$",
})
- require.ErrorContains(t, err, "decode")
+ require.ErrorContains(t, err, "could not parse")
})
t.Run("invalid entrypoint bytes", func(t *testing.T) {
@@ -232,12 +262,11 @@ func TestExecuteWithConfig(t *testing.T) {
mainDSLocation: "memory://",
entrypoint: base58.Encode([]byte("1234567890")),
})
- require.ErrorContains(t, err, "unmarshal")
+ require.ErrorContains(t, err, "could not parse")
})
t.Run("successful run", func(t *testing.T) {
- epBytes, err := testblobs.DynamicLink.Entrypoint().ToBytes()
- require.NoError(t, err)
+ ep := testblobs.DynamicLink.Entrypoint()
ctx, cancel := context.WithCancel(context.Background())
go func() {
@@ -245,31 +274,38 @@ func TestExecuteWithConfig(t *testing.T) {
cancel()
}()
- err = executeWithConfig(ctx, &config{
+ err := executeWithConfig(ctx, &config{
mainDSLocation: "memory://",
- entrypoint: base58.Encode(epBytes),
+ entrypoint: ep.String(),
})
require.NoError(t, err)
})
}
func TestExecute(t *testing.T) {
+ os.Clearenv()
+
t.Run("valid configuration", func(t *testing.T) {
- epBytes, err := testblobs.DynamicLink.Entrypoint().ToBytes()
- require.NoError(t, err)
+ ep := testblobs.DynamicLink.Entrypoint()
- t.Setenv("CINODE_ENTRYPOINT", base58.Encode(epBytes))
+ t.Setenv("CINODE_ENTRYPOINT", ep.String())
+ t.Setenv("CINODE_LISTEN_PORT", "0")
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(10 * time.Millisecond)
cancel()
}()
- err = Execute(ctx)
+ err := Execute(ctx)
require.NoError(t, err)
})
t.Run("invalid configuration", func(t *testing.T) {
- err := Execute(context.Background())
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ time.Sleep(10 * time.Millisecond)
+ cancel()
+ }()
+ err := Execute(ctx)
require.ErrorContains(t, err, "CINODE_ENTRYPOINT")
})
}
diff --git a/pkg/cmd/public_node/root.go b/pkg/cmd/public_node/root.go
index 9060291..e326a73 100644
--- a/pkg/cmd/public_node/root.go
+++ b/pkg/cmd/public_node/root.go
@@ -25,6 +25,7 @@ import (
"os"
"runtime"
"sort"
+ "strconv"
"strings"
"time"
@@ -34,10 +35,14 @@ import (
)
func Execute(ctx context.Context) error {
- return executeWithConfig(ctx, getConfig())
+ cfg, err := getConfig()
+ if err != nil {
+ return err
+ }
+ return executeWithConfig(ctx, cfg)
}
-func executeWithConfig(ctx context.Context, cfg config) error {
+func executeWithConfig(ctx context.Context, cfg *config) error {
handler, err := buildHttpHandler(cfg)
if err != nil {
return err
@@ -60,7 +65,7 @@ func executeWithConfig(ctx context.Context, cfg config) error {
)
}
-func buildHttpHandler(cfg config) (http.Handler, error) {
+func buildHttpHandler(cfg *config) (http.Handler, error) {
mainDS, err := datastore.FromLocation(cfg.mainDSLocation)
if err != nil {
return nil, fmt.Errorf("could not create main datastore: %w", err)
@@ -140,7 +145,7 @@ type config struct {
uploadPassword string
}
-func getConfig() config {
+func getConfig() (*config, error) {
cfg := config{
log: slog.Default(),
}
@@ -164,9 +169,22 @@ func getConfig() config {
cfg.additionalDSLocations = append(cfg.additionalDSLocations, location)
}
- cfg.port = 8080
+ port := os.Getenv("CINODE_LISTEN_PORT")
+ if port == "" {
+ cfg.port = 8080
+ } else {
+ portNum, err := strconv.Atoi(port)
+ if err == nil && (portNum < 0 || portNum > 65535) {
+ err = fmt.Errorf("not in range 0..65535")
+ }
+ if err != nil {
+ return nil, fmt.Errorf("invalid listen port %s: %w", port, err)
+ }
+ cfg.port = portNum
+ }
+
cfg.uploadUsername = os.Getenv("CINODE_UPLOAD_USERNAME")
cfg.uploadPassword = os.Getenv("CINODE_UPLOAD_PASSWORD")
- return cfg
+ return &cfg, nil
}
diff --git a/pkg/cmd/public_node/root_test.go b/pkg/cmd/public_node/root_test.go
index a74a419..105a0b2 100644
--- a/pkg/cmd/public_node/root_test.go
+++ b/pkg/cmd/public_node/root_test.go
@@ -19,6 +19,7 @@ package public_node
import (
"context"
"net/http/httptest"
+ "os"
"testing"
"time"
@@ -28,8 +29,11 @@ import (
)
func TestGetConfig(t *testing.T) {
+ os.Clearenv()
+
t.Run("default config", func(t *testing.T) {
- cfg := getConfig()
+ cfg, err := getConfig()
+ require.NoError(t, err)
require.Equal(t, "memory://", cfg.mainDSLocation)
require.Empty(t, cfg.additionalDSLocations)
require.Equal(t, 8080, cfg.port)
@@ -37,7 +41,8 @@ func TestGetConfig(t *testing.T) {
t.Run("set main datastore", func(t *testing.T) {
t.Setenv("CINODE_MAIN_DATASTORE", "testdatastore")
- cfg := getConfig()
+ cfg, err := getConfig()
+ require.NoError(t, err)
require.Equal(t, cfg.mainDSLocation, "testdatastore")
})
@@ -47,7 +52,8 @@ func TestGetConfig(t *testing.T) {
t.Setenv("CINODE_ADDITIONAL_DATASTORE_2", "additional2")
t.Setenv("CINODE_ADDITIONAL_DATASTORE_1", "additional1")
- cfg := getConfig()
+ cfg, err := getConfig()
+ require.NoError(t, err)
require.Equal(t, cfg.additionalDSLocations, []string{
"additional",
"additional1",
@@ -55,11 +61,30 @@ func TestGetConfig(t *testing.T) {
"additional3",
})
})
+
+ t.Run("set listen port", func(t *testing.T) {
+ t.Setenv("CINODE_LISTEN_PORT", "12345")
+ cfg, err := getConfig()
+ require.NoError(t, err)
+ require.Equal(t, 12345, cfg.port)
+ })
+
+ t.Run("invalid port - not a number", func(t *testing.T) {
+ t.Setenv("CINODE_LISTEN_PORT", "123-45")
+ _, err := getConfig()
+ require.ErrorContains(t, err, "invalid listen port")
+ })
+
+ t.Run("invalid port - outside range", func(t *testing.T) {
+ t.Setenv("CINODE_LISTEN_PORT", "-1")
+ _, err := getConfig()
+ require.ErrorContains(t, err, "invalid listen port")
+ })
}
func TestBuildHttpHandler(t *testing.T) {
t.Run("Successfully created handler", func(t *testing.T) {
- h, err := buildHttpHandler(config{
+ h, err := buildHttpHandler(&config{
mainDSLocation: t.TempDir(),
additionalDSLocations: []string{
t.TempDir(),
@@ -89,7 +114,7 @@ func TestBuildHttpHandler(t *testing.T) {
const VALID_PASSWORD = "secret"
const INVALID_PASSWORD = "plaintext"
- h, err := buildHttpHandler(config{
+ h, err := buildHttpHandler(&config{
mainDSLocation: t.TempDir(),
additionalDSLocations: []string{
t.TempDir(),
@@ -125,7 +150,7 @@ func TestBuildHttpHandler(t *testing.T) {
})
t.Run("invalid main datastore", func(t *testing.T) {
- h, err := buildHttpHandler(config{
+ h, err := buildHttpHandler(&config{
mainDSLocation: "",
})
require.ErrorContains(t, err, "could not create main datastore")
@@ -133,7 +158,7 @@ func TestBuildHttpHandler(t *testing.T) {
})
t.Run("invalid additional datastore", func(t *testing.T) {
- h, err := buildHttpHandler(config{
+ h, err := buildHttpHandler(&config{
mainDSLocation: "memory://",
additionalDSLocations: []string{""},
})
@@ -149,7 +174,7 @@ func TestExecuteWithConfig(t *testing.T) {
time.Sleep(10 * time.Millisecond)
cancel()
}()
- err := executeWithConfig(ctx, config{
+ err := executeWithConfig(ctx, &config{
mainDSLocation: "memory://",
log: slog.Default(),
})
@@ -157,13 +182,15 @@ func TestExecuteWithConfig(t *testing.T) {
})
t.Run("invalid configuration", func(t *testing.T) {
- err := executeWithConfig(context.Background(), config{})
+ err := executeWithConfig(context.Background(), &config{})
require.ErrorContains(t, err, "datastore")
})
}
func TestExecute(t *testing.T) {
t.Run("valid configuration", func(t *testing.T) {
+ t.Setenv("CINODE_LISTEN_PORT", "0")
+
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(10 * time.Millisecond)
@@ -178,4 +205,10 @@ func TestExecute(t *testing.T) {
err := Execute(context.Background())
require.ErrorContains(t, err, "datastore")
})
+
+ t.Run("invalid configuration - port", func(t *testing.T) {
+ t.Setenv("CINODE_LISTEN_PORT", "-1")
+ err := Execute(context.Background())
+ require.ErrorContains(t, err, "listen port")
+ })
}
diff --git a/pkg/cmd/static_datastore/compile.go b/pkg/cmd/static_datastore/compile.go
index ef2e6eb..b0d1bdf 100644
--- a/pkg/cmd/static_datastore/compile.go
+++ b/pkg/cmd/static_datastore/compile.go
@@ -19,45 +19,42 @@ package static_datastore
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"log"
"os"
+ "strings"
"github.com/cinode/go/pkg/blenc"
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/cinodefs/uploader"
"github.com/cinode/go/pkg/datastore"
- "github.com/cinode/go/pkg/protobuf"
- "github.com/cinode/go/pkg/structure"
- "github.com/jbenet/go-base58"
"github.com/spf13/cobra"
- "golang.org/x/exp/slog"
)
func compileCmd() *cobra.Command {
-
- var srcDir, dstDir string
- var useStaticBlobs bool
- var useRawFilesystem bool
+ var o compileFSOptions
var rootWriterInfoStr string
var rootWriterInfoFile string
+ var useRawFilesystem bool
cmd := &cobra.Command{
- Use: "compile --source --destination ",
+ Use: "compile --source --destination ",
Short: "Compile datastore from static files",
- Long: `
-The compile command can be used to create an encrypted datastore from
-a content with static files that can then be used to serve through a
-simple http server.
-`,
- Run: func(cmd *cobra.Command, args []string) {
- if srcDir == "" || dstDir == "" {
- cmd.Help()
- return
+ Long: strings.Join([]string{
+ "The compile command can be used to create an encrypted datastore from",
+ "a content with static files that can then be used to serve through a",
+ "simple http server.",
+ }, "\n"),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if o.srcDir == "" || o.dstLocation == "" {
+ return cmd.Help()
}
- enc := json.NewEncoder(os.Stdout)
+ enc := json.NewEncoder(cmd.OutOrStdout())
enc.SetIndent("", " ")
- fatalResult := func(format string, args ...interface{}) {
+ fatalResult := func(format string, args ...interface{}) error {
msg := fmt.Sprintf(format, args...)
enc.Encode(map[string]string{
@@ -65,114 +62,182 @@ simple http server.
"msg": msg,
})
- log.Fatalf(msg)
+ cmd.SilenceUsage = true
+ cmd.SilenceErrors = true
+ return errors.New(msg)
}
- var wi *protobuf.WriterInfo
if len(rootWriterInfoFile) > 0 {
data, err := os.ReadFile(rootWriterInfoFile)
if err != nil {
- fatalResult("Couldn't read data from the writer info file at '%s': %v", rootWriterInfoFile, err)
+ return fatalResult("Couldn't read data from the writer info file at '%s': %v", rootWriterInfoFile, err)
}
if len(data) == 0 {
- fatalResult("Writer info file at '%s' is empty", rootWriterInfoFile)
+ return fatalResult("Writer info file at '%s' is empty", rootWriterInfoFile)
}
rootWriterInfoStr = string(data)
}
if len(rootWriterInfoStr) > 0 {
- _wi, err := protobuf.WriterInfoFromBytes(base58.Decode(rootWriterInfoStr))
+ wi, err := cinodefs.WriterInfoFromString(rootWriterInfoStr)
if err != nil {
- fatalResult("Couldn't parse writer info: %v", err)
+ return fatalResult("Couldn't parse writer info: %v", err)
}
- wi = _wi
+ o.writerInfo = wi
}
- ep, wi, err := compileFS(srcDir, dstDir, useStaticBlobs, wi, useRawFilesystem)
- if err != nil {
- fatalResult("%s", err)
+ if useRawFilesystem {
+ // For backwards compatibility
+ o.dstLocation = "file-raw://" + o.dstLocation
}
- epBytes, err := ep.ToBytes()
+ ep, wi, err := compileFS(cmd.Context(), o)
if err != nil {
- fatalResult("Couldn't serialize entrypoint: %v", err)
+ return fatalResult("%s", err)
}
result := map[string]string{
"result": "OK",
- "entrypoint": base58.Encode(epBytes),
+ "entrypoint": ep.String(),
}
if wi != nil {
- wiBytes, err := wi.ToBytes()
- if err != nil {
- fatalResult("Couldn't serialize writer info: %v", err)
- }
-
- result["writer-info"] = base58.Encode(wiBytes)
+ result["writer-info"] = wi.String()
}
enc.Encode(result)
log.Println("DONE")
-
+ return nil
},
}
- cmd.Flags().StringVarP(&srcDir, "source", "s", "", "Source directory with content to compile")
- cmd.Flags().StringVarP(&dstDir, "destination", "d", "", "Destination directory for blobs")
- cmd.Flags().BoolVarP(&useStaticBlobs, "static", "t", false, "If set to true, compile only the static dataset, do not create or update dynamic link")
- cmd.Flags().BoolVarP(&useRawFilesystem, "raw-filesystem", "r", false, "If set to true, use raw filesystem instead of the optimized one, can be used to create dataset for a standard http server")
- cmd.Flags().StringVarP(&rootWriterInfoStr, "writer-info", "w", "", "Writer info for the root dynamic link, if neither writer info nor writer info file is specified, a random writer info will be generated and printed out")
- cmd.Flags().StringVarP(&rootWriterInfoFile, "writer-info-file", "f", "", "Name of the file containing writer info for the root dynamic link, if neither writer info nor writer info file is specified, a random writer info will be generated and printed out")
+ cmd.Flags().StringVarP(
+ &o.srcDir, "source", "s", "",
+ "Source directory with content to compile",
+ )
+ cmd.Flags().StringVarP(
+ &o.dstLocation, "destination", "d", "",
+ "location of destination datastore for blobs, can be a directory "+
+ "or an url prefixed with file://, file-raw://, http://, https://",
+ )
+ cmd.Flags().BoolVarP(
+ &o.static, "static", "t", false,
+ "if set to true, compile only the static dataset, do not create or update dynamic link",
+ )
+ cmd.Flags().BoolVarP(
+ &useRawFilesystem, "raw-filesystem", "r", false,
+ "if set to true, use raw filesystem instead of the optimized one, "+
+ "can be used to create dataset for a standard http server",
+ )
+ cmd.Flags().MarkDeprecated(
+ "raw-filesystem",
+ "use file-raw:// destination prefix instead",
+ )
+ cmd.Flags().StringVarP(
+ &rootWriterInfoStr, "writer-info", "w", "",
+ "writer info for the root dynamic link, if neither writer info nor writer info file is specified, "+
+ "a random writer info will be generated and printed out",
+ )
+ cmd.Flags().StringVarP(
+ &rootWriterInfoFile, "writer-info-file", "f", "",
+ "name of the file containing writer info for the root dynamic link, "+
+ "if neither writer info nor writer info file is specified, "+
+ "a random writer info will be generated and printed out",
+ )
+ cmd.Flags().StringVar(
+ &o.indexFile, "index-file", "index.html",
+ "name of the index file",
+ )
+ cmd.Flags().BoolVar(
+ &o.generateIndexFiles, "generate-index-files", false,
+ "automatically generate index html files with directory listing if index file is not present",
+ )
+ cmd.Flags().BoolVar(
+ &o.append, "append", false,
+ "append file in existing datastore leaving existing unchanged files as is",
+ )
return cmd
}
+type compileFSOptions struct {
+ srcDir string
+ dstLocation string
+ static bool
+ writerInfo *cinodefs.WriterInfo
+ generateIndexFiles bool
+ indexFile string
+ append bool
+}
+
func compileFS(
- srcDir, dstDir string,
- static bool,
- writerInfo *protobuf.WriterInfo,
- useRawFS bool,
+ ctx context.Context,
+ o compileFSOptions,
) (
- *protobuf.Entrypoint,
- *protobuf.WriterInfo,
+ *cinodefs.Entrypoint,
+ *cinodefs.WriterInfo,
error,
) {
- var retWi *protobuf.WriterInfo
-
- ds, err := func() (datastore.DS, error) {
- if useRawFS {
- return datastore.InRawFileSystem(dstDir)
- }
- return datastore.InFileSystem(dstDir)
- }()
+ ds, err := datastore.FromLocation(o.dstLocation)
if err != nil {
return nil, nil, fmt.Errorf("could not open datastore: %w", err)
}
- be := blenc.FromDatastore(ds)
+ opts := []cinodefs.Option{}
+ if o.static {
+ opts = append(opts, cinodefs.NewRootStaticDirectory())
+ } else if o.writerInfo == nil {
+ opts = append(opts, cinodefs.NewRootDynamicLink())
+ } else {
+ opts = append(opts, cinodefs.RootWriterInfo(o.writerInfo))
+ }
- ep, err := structure.UploadStaticDirectory(
- context.Background(),
- slog.Default(),
- os.DirFS(srcDir),
- be,
+ fs, err := cinodefs.New(
+ ctx,
+ blenc.FromDatastore(ds),
+ opts...,
)
if err != nil {
- return nil, nil, fmt.Errorf("couldn't upload directory content: %w", err)
+ return nil, nil, fmt.Errorf("couldn't create cinode filesystem instance: %w", err)
}
- if !static {
- if writerInfo == nil {
- ep, retWi, err = structure.CreateLink(context.Background(), be, ep)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to update root link: %w", err)
- }
- } else {
- ep, err = structure.UpdateLink(context.Background(), be, writerInfo, ep)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to update root link: %w", err)
- }
+ if !o.append {
+ err = fs.ResetDir(ctx, []string{})
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to reset the root directory: %w", err)
}
}
- return ep, retWi, nil
+ var genOpts []uploader.Option
+ if o.generateIndexFiles {
+ genOpts = append(genOpts, uploader.CreateIndexFile(o.indexFile))
+ }
+
+ err = uploader.UploadStaticDirectory(
+ ctx,
+ os.DirFS(o.srcDir),
+ fs,
+ genOpts...,
+ )
+ if err != nil {
+ return nil, nil, fmt.Errorf("couldn't upload directory content: %w", err)
+ }
+
+ err = fs.Flush(ctx)
+ if err != nil {
+ return nil, nil, fmt.Errorf("couldn't flush after directory upload: %w", err)
+ }
+
+ ep, err := fs.RootEntrypoint()
+ if err != nil {
+ return nil, nil, fmt.Errorf("couldn't get root entrypoint from cinodefs instance: %w", err)
+ }
+
+ wi, err := fs.RootWriterInfo(ctx)
+ if errors.Is(err, cinodefs.ErrNotALink) {
+ return ep, nil, nil
+ }
+ if err != nil {
+ return nil, nil, fmt.Errorf("couldn't get root writer info from cinodefs instance: %w", err)
+ }
+
+ return ep, wi, nil
}
diff --git a/pkg/cmd/static_datastore/root.go b/pkg/cmd/static_datastore/root.go
index ddd435a..38b9514 100644
--- a/pkg/cmd/static_datastore/root.go
+++ b/pkg/cmd/static_datastore/root.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,8 +17,7 @@ limitations under the License.
package static_datastore
import (
- "fmt"
- "os"
+ "context"
"github.com/spf13/cobra"
)
@@ -50,9 +49,6 @@ node is stored in a plaintext in a file called 'entrypoint.txt'.
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
-func Execute() {
- if err := rootCmd().Execute(); err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
+func Execute(ctx context.Context) error {
+ return rootCmd().ExecuteContext(ctx)
}
diff --git a/pkg/cmd/static_datastore/static_datastore_test.go b/pkg/cmd/static_datastore/static_datastore_test.go
index e9b3855..f1d3aeb 100644
--- a/pkg/cmd/static_datastore/static_datastore_test.go
+++ b/pkg/cmd/static_datastore/static_datastore_test.go
@@ -18,6 +18,8 @@ package static_datastore
import (
"bytes"
+ "context"
+ "encoding/json"
"io"
"net/http"
"net/http/httptest"
@@ -26,11 +28,14 @@ import (
"testing"
"github.com/cinode/go/pkg/blenc"
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/cinodefs/httphandler"
"github.com/cinode/go/pkg/datastore"
- "github.com/cinode/go/pkg/protobuf"
- "github.com/cinode/go/pkg/structure"
+ "github.com/cinode/go/pkg/utilities/golang"
+ "github.com/spf13/cobra"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
+ "golang.org/x/exp/slog"
)
type datasetFile struct {
@@ -45,7 +50,6 @@ type CompileAndReadTestSuite struct {
}
func TestCompileAndReadTestSuite(t *testing.T) {
-
s := &CompileAndReadTestSuite{
initialTestDataset: []datasetFile{
{
@@ -97,113 +101,258 @@ func TestCompileAndReadTestSuite(t *testing.T) {
suite.Run(t, s)
}
+type testOutputParser struct {
+ Result string `json:"result"`
+ Msg string `json:"msg"`
+ WI string `json:"writer-info"`
+ EP string `json:"entrypoint"`
+}
+
func (s *CompileAndReadTestSuite) uploadDatasetToDatastore(
+ t *testing.T,
dataset []datasetFile,
datastoreDir string,
- wi *protobuf.WriterInfo,
-) (*protobuf.WriterInfo, *protobuf.Entrypoint) {
+ extraArgs ...string,
+) (wi *cinodefs.WriterInfo, ep *cinodefs.Entrypoint) {
+ dir := t.TempDir()
+
+ for _, td := range dataset {
+ err := os.MkdirAll(filepath.Join(dir, filepath.Dir(td.fName)), 0777)
+ s.Require().NoError(err)
- var ep *protobuf.Entrypoint
- s.T().Run("prepare dataset", func(t *testing.T) {
+ err = os.WriteFile(filepath.Join(dir, td.fName), []byte(td.contents), 0600)
+ s.Require().NoError(err)
+ }
+
+ buf := bytes.NewBuffer(nil)
- dir := t.TempDir()
+ args := []string{
+ "compile",
+ "-s", dir,
+ "-d", datastoreDir,
+ }
+ args = append(args, extraArgs...)
- for _, td := range dataset {
- err := os.MkdirAll(filepath.Join(dir, filepath.Dir(td.fName)), 0777)
- s.Require().NoError(err)
+ cmd := rootCmd()
+ cmd.SetArgs(args)
+ cmd.SetOut(buf)
- err = os.WriteFile(filepath.Join(dir, td.fName), []byte(td.contents), 0600)
- s.Require().NoError(err)
- }
+ err := cmd.Execute()
+ require.NoError(t, err)
- retEp, retWi, err := compileFS(dir, datastoreDir, false, wi, false)
- require.NoError(t, err)
- wi = retWi
- ep = retEp
- })
+ output := testOutputParser{}
+ err = json.Unmarshal(buf.Bytes(), &output)
+ require.NoError(t, err)
+ require.Equal(t, "OK", output.Result)
+
+ if output.WI != "" {
+ wi = golang.Must(cinodefs.WriterInfoFromString(output.WI))
+ }
+ ep = golang.Must(cinodefs.EntrypointFromString(output.EP))
return wi, ep
}
func (s *CompileAndReadTestSuite) validateDataset(
+ t *testing.T,
dataset []datasetFile,
- ep *protobuf.Entrypoint,
+ ep *cinodefs.Entrypoint,
datastoreDir string,
) {
ds, err := datastore.InFileSystem(datastoreDir)
s.Require().NoError(err)
- fs := structure.CinodeFS{
- BE: blenc.FromDatastore(ds),
- RootEntrypoint: ep,
- MaxLinkRedirects: 10,
- }
+ fs, err := cinodefs.New(
+ context.Background(),
+ blenc.FromDatastore(ds),
+ cinodefs.RootEntrypoint(ep),
+ cinodefs.MaxLinkRedirects(10),
+ )
+ s.Require().NoError(err)
- testServer := httptest.NewServer(&structure.HTTPHandler{
- FS: &fs,
+ testServer := httptest.NewServer(&httphandler.Handler{
+ FS: fs,
IndexFile: "index.html",
+ Log: slog.Default(),
})
defer testServer.Close()
for _, td := range dataset {
- s.Run(td.fName, func() {
+ t.Run(td.fName, func(t *testing.T) {
res, err := http.Get(testServer.URL + td.fName)
- s.Require().NoError(err)
+ require.NoError(t, err)
defer res.Body.Close()
data, err := io.ReadAll(res.Body)
- s.Require().NoError(err)
- s.Require().Equal([]byte(td.contents), data)
+ require.NoError(t, err)
+ require.Equal(t, []byte(td.contents), data)
res, err = http.Post(testServer.URL+td.fName, "plain/text", bytes.NewReader([]byte("test")))
- s.Require().NoError(err)
+ require.NoError(t, err)
defer res.Body.Close()
- s.Require().Equal(http.StatusMethodNotAllowed, res.StatusCode)
+ require.Equal(t, http.StatusMethodNotAllowed, res.StatusCode)
res, err = http.Get(testServer.URL + td.fName + ".notfound")
- s.Require().NoError(err)
+ require.NoError(t, err)
defer res.Body.Close()
- s.Require().Equal(http.StatusNotFound, res.StatusCode)
+ require.Equal(t, http.StatusNotFound, res.StatusCode)
})
}
- s.Run("Default to index.html", func() {
+ t.Run("Default to index.html", func(t *testing.T) {
res, err := http.Get(testServer.URL + "/")
- s.Require().NoError(err)
+ require.NoError(t, err)
defer res.Body.Close()
data, err := io.ReadAll(res.Body)
- s.Require().NoError(err)
+ require.NoError(t, err)
- s.Require().Equal([]byte("Index"), data)
+ require.Equal(t, []byte("Index"), data)
})
}
func (s *CompileAndReadTestSuite) TestCompileAndRead() {
- datastore := s.T().TempDir()
+ t := s.T()
+ datastore := t.TempDir()
// Create and test initial dataset
- wi, ep := s.uploadDatasetToDatastore(s.initialTestDataset, datastore, nil)
- s.validateDataset(s.initialTestDataset, ep, datastore)
+ wi, ep := s.uploadDatasetToDatastore(t, s.initialTestDataset, datastore)
+ s.validateDataset(t, s.initialTestDataset, ep, datastore)
+
+ t.Run("Re-upload same dataset", func(t *testing.T) {
+ s.uploadDatasetToDatastore(t, s.initialTestDataset, datastore,
+ "--writer-info", wi.String(),
+ )
+ s.validateDataset(t, s.initialTestDataset, ep, datastore)
+ })
+
+ t.Run("Upload modified dataset but for different root link", func(t *testing.T) {
+ _, updatedEP := s.uploadDatasetToDatastore(t, s.updatedTestDataset, datastore)
+ s.validateDataset(t, s.updatedTestDataset, updatedEP, datastore)
+ s.Require().NotEqual(ep, updatedEP)
+
+ // After restoring the original entrypoint dataset should be back to the initial one
+ s.validateDataset(t, s.initialTestDataset, ep, datastore)
+ })
+
+ t.Run("Update the original entrypoint with the new dataset", func(t *testing.T) {
+ _, epOrigWriterInfo := s.uploadDatasetToDatastore(t, s.updatedTestDataset, datastore,
+ "--writer-info", wi.String(),
+ )
+ s.validateDataset(t, s.updatedTestDataset, epOrigWriterInfo, datastore)
+
+ // Entrypoint must stay the same
+ require.EqualValues(t, ep, epOrigWriterInfo)
+ })
+
+ s.T().Run("Upload data with static entrypoint", func(t *testing.T) {
+ wiStatic, epStatic := s.uploadDatasetToDatastore(t, s.initialTestDataset, datastore,
+ "--static",
+ )
+ s.validateDataset(t, s.initialTestDataset, epStatic, datastore)
+ require.Nil(t, wiStatic)
+ })
- // Re-upload same dataset
- s.uploadDatasetToDatastore(s.initialTestDataset, datastore, wi)
- s.validateDataset(s.initialTestDataset, ep, datastore)
+ s.T().Run("Read writer info from file", func(t *testing.T) {
+ wiFile := filepath.Join(t.TempDir(), "epfile")
+ require.NoError(t, os.WriteFile(wiFile, []byte(wi.String()), 0777))
- // Upload modified dataset but for different root link
- _, updatedEP := s.uploadDatasetToDatastore(s.updatedTestDataset, datastore, nil)
- s.validateDataset(s.updatedTestDataset, updatedEP, datastore)
- s.Require().NotEqual(ep, updatedEP)
+ _, ep := s.uploadDatasetToDatastore(t, s.initialTestDataset, datastore,
+ "--writer-info-file", wiFile,
+ )
+ s.validateDataset(t, s.initialTestDataset, ep, datastore)
+ })
+
+}
- // After restoring the original entrypoint dataset should be back to the initial one
- s.validateDataset(s.initialTestDataset, ep, datastore)
+func testExecCommand(cmd *cobra.Command, args []string) (output, stderr []byte, err error) {
+ outputBuff := bytes.NewBuffer(nil)
+ stderrBuff := bytes.NewBuffer(nil)
+ cmd.SetOutput(outputBuff)
+ cmd.SetErr(stderrBuff)
+ cmd.SetArgs(args)
+ err = cmd.Execute()
+ return outputBuff.Bytes(), stderrBuff.Bytes(), err
+}
- // Update the original entrypoint with the new dataset
- _, epOrigWriterInfo := s.uploadDatasetToDatastore(s.updatedTestDataset, datastore, wi)
- s.validateDataset(s.updatedTestDataset, epOrigWriterInfo, datastore)
+func testExec(args []string) (output, stderr []byte, err error) {
+ return testExecCommand(rootCmd(), args)
+}
- // Entrypoint must stay the same
- s.Require().EqualValues(ep, epOrigWriterInfo)
+func TestHelpCalls(t *testing.T) {
+ for _, d := range []struct {
+ name string
+ args []string
+ }{
+ {"no args", []string{}},
+ {"not enough compile args", []string{"compile"}},
+ } {
+ t.Run(d.name, func(t *testing.T) {
+ cmd := rootCmd()
+ helpCalled := false
+ cmd.SetHelpFunc(func(c *cobra.Command, s []string) { helpCalled = true })
+ cmd.SetArgs(d.args)
+ err := cmd.Execute()
+ require.NoError(t, err)
+ require.True(t, helpCalled)
+ })
+ }
+}
+
+func TestInvalidOptions(t *testing.T) {
+ tempDir := t.TempDir()
+ emptyFile := filepath.Join(tempDir, "empty")
+
+ err := os.WriteFile(emptyFile, []byte{}, 0777)
+ require.NoError(t, err)
+
+ for _, d := range []struct {
+ name string
+ args []string
+ errorContains string
+ }{
+ {
+ name: "invalid root writer info",
+ args: []string{
+ "compile",
+ "--source", t.TempDir(),
+ "--destination", t.TempDir(),
+ "--writer-info", "not-a-valid-writer-info",
+ },
+ errorContains: "Couldn't parse writer info:",
+ },
+ {
+ name: "invalid root writer info file",
+ args: []string{
+ "compile",
+ "--source", t.TempDir(),
+ "--destination", t.TempDir(),
+ "--writer-info-file", "/invalid/file/name/with/writer/info",
+ },
+ errorContains: "no such file or directory",
+ },
+ {
+ name: "empty root writer info file",
+ args: []string{
+ "compile",
+ "--source", t.TempDir(),
+ "--destination", t.TempDir(),
+ "--writer-info-file", emptyFile,
+ },
+ errorContains: "is empty",
+ },
+ } {
+ t.Run(d.name, func(t *testing.T) {
+ output, _, err := testExec(d.args)
+ require.ErrorContains(t, err, d.errorContains)
+
+ parsedOutput := testOutputParser{}
+ err = json.Unmarshal(output, &parsedOutput)
+ require.NoError(t, err)
+ require.Equal(t, "ERROR", parsedOutput.Result)
+ require.Contains(t, parsedOutput.Msg, d.errorContains)
+ })
+ }
}
diff --git a/pkg/common/auth_info.go b/pkg/common/auth_info.go
new file mode 100644
index 0000000..6891bc3
--- /dev/null
+++ b/pkg/common/auth_info.go
@@ -0,0 +1,29 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import "crypto/subtle"
+
+// AuthInfo is an opaque data that is necessary to perform update of an existing blob.
+//
+// Currently used only for dynamic links, auth info contains all the necessary information
+// to update the content of the blob. The representation is specific to the blob type
+type AuthInfo struct{ data []byte }
+
+func AuthInfoFromBytes(ai []byte) *AuthInfo { return &AuthInfo{data: copyBytes(ai)} }
+func (a *AuthInfo) Bytes() []byte { return copyBytes(a.data) }
+func (a *AuthInfo) Equal(a2 *AuthInfo) bool { return subtle.ConstantTimeCompare(a.data, a2.data) == 1 }
diff --git a/pkg/common/auth_info_test.go b/pkg/common/auth_info_test.go
new file mode 100644
index 0000000..a875052
--- /dev/null
+++ b/pkg/common/auth_info_test.go
@@ -0,0 +1,31 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestAuthInfo(t *testing.T) {
+ authInfoBytes := []byte{1, 2, 3}
+ authInfo := AuthInfoFromBytes(authInfoBytes)
+ require.Equal(t, authInfoBytes, authInfo.Bytes())
+ require.True(t, authInfo.Equal(AuthInfoFromBytes(authInfoBytes)))
+ require.Nil(t, new(BlobKey).Bytes())
+}
diff --git a/pkg/common/blob_keys.go b/pkg/common/blob_keys.go
new file mode 100644
index 0000000..60e1adb
--- /dev/null
+++ b/pkg/common/blob_keys.go
@@ -0,0 +1,42 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import "crypto/subtle"
+
+func copyBytes(b []byte) []byte {
+ if b == nil {
+ return nil
+ }
+ ret := make([]byte, len(b))
+ copy(ret, b)
+ return ret
+}
+
+// Key with cipher type
+type BlobKey struct{ key []byte }
+
+func BlobKeyFromBytes(key []byte) *BlobKey { return &BlobKey{key: copyBytes(key)} }
+func (k *BlobKey) Bytes() []byte { return copyBytes(k.key) }
+func (k *BlobKey) Equal(k2 *BlobKey) bool { return subtle.ConstantTimeCompare(k.key, k2.key) == 1 }
+
+// IV
+type BlobIV struct{ iv []byte }
+
+func BlobIVFromBytes(iv []byte) *BlobIV { return &BlobIV{iv: copyBytes(iv)} }
+func (i *BlobIV) Bytes() []byte { return copyBytes(i.iv) }
+func (i *BlobIV) Equal(i2 *BlobIV) bool { return subtle.ConstantTimeCompare(i.iv, i2.iv) == 1 }
diff --git a/pkg/common/blob_keys_test.go b/pkg/common/blob_keys_test.go
new file mode 100644
index 0000000..28b32df
--- /dev/null
+++ b/pkg/common/blob_keys_test.go
@@ -0,0 +1,39 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestBlobKey(t *testing.T) {
+ keyBytes := []byte{1, 2, 3}
+ key := BlobKeyFromBytes(keyBytes)
+ require.Equal(t, keyBytes, key.Bytes())
+ require.True(t, key.Equal(BlobKeyFromBytes(keyBytes)))
+ require.Nil(t, new(BlobKey).Bytes())
+}
+
+func TestBlobIV(t *testing.T) {
+ ivBytes := []byte{1, 2, 3}
+ iv := BlobIVFromBytes(ivBytes)
+ require.Equal(t, ivBytes, iv.Bytes())
+ require.True(t, iv.Equal(BlobIVFromBytes(ivBytes)))
+ require.Nil(t, new(BlobKey).Bytes())
+}
diff --git a/pkg/common/blob_name.go b/pkg/common/blob_name.go
index c6d57b0..d664c69 100644
--- a/pkg/common/blob_name.go
+++ b/pkg/common/blob_name.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
+ "crypto/subtle"
"errors"
base58 "github.com/jbenet/go-base58"
@@ -32,52 +33,65 @@ var (
// The type of the blob is not stored directly. Instead it is mixed
// with the hash of the blob to make sure that all bytes in the blob name
// are randomly distributed.
-type BlobName []byte
+type BlobName struct {
+ bn []byte
+}
// BlobNameFromHashAndType generates the name of a blob from some hash (e.g. sha256 of blob's content)
// and given blob type
-func BlobNameFromHashAndType(hash []byte, t BlobType) (BlobName, error) {
+func BlobNameFromHashAndType(hash []byte, t BlobType) (*BlobName, error) {
if len(hash) == 0 || len(hash) > 0x7E {
return nil, ErrInvalidBlobName
}
- ret := make([]byte, len(hash)+1)
+ bn := make([]byte, len(hash)+1)
- copy(ret[1:], hash)
+ copy(bn[1:], hash)
scrambledTypeByte := byte(t.t)
for _, b := range hash {
scrambledTypeByte ^= b
}
- ret[0] = scrambledTypeByte
+ bn[0] = scrambledTypeByte
- return BlobName(ret), nil
+ return &BlobName{bn: bn}, nil
}
// BlobNameFromString decodes base58-encoded string into blob name
-func BlobNameFromString(s string) (BlobName, error) {
- decoded := base58.Decode(s)
- if len(decoded) == 0 || len(decoded) > 0x7F {
+func BlobNameFromString(s string) (*BlobName, error) {
+ return BlobNameFromBytes(base58.Decode(s))
+}
+
+func BlobNameFromBytes(n []byte) (*BlobName, error) {
+ if len(n) == 0 || len(n) > 0x7F {
return nil, ErrInvalidBlobName
}
- return BlobName(decoded), nil
+ return &BlobName{bn: copyBytes(n)}, nil
}
// Returns base58-encoded blob name
-func (b BlobName) String() string {
- return base58.Encode(b)
+func (b *BlobName) String() string {
+ return base58.Encode(b.bn)
}
// Extracts hash from blob name
-func (b BlobName) Hash() []byte {
- return b[1:]
+func (b *BlobName) Hash() []byte {
+ return b.bn[1:]
}
// Extracts blob type from the name
-func (b BlobName) Type() BlobType {
+func (b *BlobName) Type() BlobType {
ret := byte(0)
- for _, by := range b {
+ for _, by := range b.bn {
ret ^= by
}
return BlobType{t: ret}
}
+
+func (b *BlobName) Bytes() []byte {
+ return copyBytes(b.bn)
+}
+
+func (b *BlobName) Equal(b2 *BlobName) bool {
+ return subtle.ConstantTimeCompare(b.bn, b2.bn) == 1
+}
diff --git a/pkg/common/blob_name_test.go b/pkg/common/blob_name_test.go
index 28d1550..f60e8c2 100644
--- a/pkg/common/blob_name_test.go
+++ b/pkg/common/blob_name_test.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -43,7 +43,7 @@ func TestBlobName(t *testing.T) {
bn, err := BlobNameFromHashAndType(h, bt)
assert.NoError(t, err)
assert.NotEmpty(t, bn)
- assert.Greater(t, len(bn), len(h))
+ assert.Greater(t, len(bn.bn), len(h))
assert.Equal(t, h, bn.Hash())
assert.Equal(t, bt, bn.Type())
@@ -51,6 +51,13 @@ func TestBlobName(t *testing.T) {
bn2, err := BlobNameFromString(s)
require.NoError(t, err)
require.Equal(t, bn, bn2)
+ require.True(t, bn.Equal(bn2))
+
+ b := bn.Bytes()
+ bn3, err := BlobNameFromBytes(b)
+ require.NoError(t, err)
+ require.Equal(t, bn, bn3)
+ require.True(t, bn.Equal(bn3))
})
}
}
diff --git a/pkg/datastore/datastore.go b/pkg/datastore/datastore.go
index cc9591d..5767ff3 100644
--- a/pkg/datastore/datastore.go
+++ b/pkg/datastore/datastore.go
@@ -38,7 +38,7 @@ func (ds *datastore) Address() string {
return ds.s.address()
}
-func (ds *datastore) Open(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (ds *datastore) Open(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
switch name.Type() {
case blobtypes.Static:
return ds.openStatic(ctx, name)
@@ -49,7 +49,7 @@ func (ds *datastore) Open(ctx context.Context, name common.BlobName) (io.ReadClo
}
}
-func (ds *datastore) Update(ctx context.Context, name common.BlobName, updateStream io.Reader) error {
+func (ds *datastore) Update(ctx context.Context, name *common.BlobName, updateStream io.Reader) error {
switch name.Type() {
case blobtypes.Static:
return ds.updateStatic(ctx, name, updateStream)
@@ -60,11 +60,11 @@ func (ds *datastore) Update(ctx context.Context, name common.BlobName, updateStr
}
}
-func (ds *datastore) Exists(ctx context.Context, name common.BlobName) (bool, error) {
+func (ds *datastore) Exists(ctx context.Context, name *common.BlobName) (bool, error) {
return ds.s.exists(ctx, name)
}
-func (ds *datastore) Delete(ctx context.Context, name common.BlobName) error {
+func (ds *datastore) Delete(ctx context.Context, name *common.BlobName) error {
return ds.s.delete(ctx, name)
}
diff --git a/pkg/datastore/datastore_dynamic_link.go b/pkg/datastore/datastore_dynamic_link.go
index ed0375a..7ed3bbb 100644
--- a/pkg/datastore/datastore_dynamic_link.go
+++ b/pkg/datastore/datastore_dynamic_link.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@ import (
"github.com/cinode/go/pkg/internal/blobtypes/dynamiclink"
)
-func (ds *datastore) openDynamicLink(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (ds *datastore) openDynamicLink(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
rc, err := ds.s.openReadStream(ctx, name)
if err != nil {
return nil, err
@@ -50,7 +50,7 @@ func (ds *datastore) openDynamicLink(ctx context.Context, name common.BlobName)
// read from - only for comparison
func (ds *datastore) newLinkGreaterThanCurrent(
ctx context.Context,
- name common.BlobName,
+ name *common.BlobName,
newLink *dynamiclink.PublicReader,
) (
bool, error,
@@ -72,7 +72,7 @@ func (ds *datastore) newLinkGreaterThanCurrent(
return newLink.GreaterThan(dl), nil
}
-func (ds *datastore) updateDynamicLink(ctx context.Context, name common.BlobName, updateStream io.Reader) error {
+func (ds *datastore) updateDynamicLink(ctx context.Context, name *common.BlobName, updateStream io.Reader) error {
ws, err := ds.s.openWriteStream(ctx, name)
if err != nil {
return err
diff --git a/pkg/datastore/datastore_static.go b/pkg/datastore/datastore_static.go
index bee30a9..05bb5bb 100644
--- a/pkg/datastore/datastore_static.go
+++ b/pkg/datastore/datastore_static.go
@@ -27,7 +27,7 @@ import (
"github.com/cinode/go/pkg/internal/utilities/validatingreader"
)
-func (ds *datastore) openStatic(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (ds *datastore) openStatic(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
rc, err := ds.s.openReadStream(ctx, name)
if err != nil {
return nil, err
@@ -47,7 +47,7 @@ func (ds *datastore) openStatic(ctx context.Context, name common.BlobName) (io.R
}, nil
}
-func (ds *datastore) updateStatic(ctx context.Context, name common.BlobName, updateStream io.Reader) error {
+func (ds *datastore) updateStatic(ctx context.Context, name *common.BlobName, updateStream io.Reader) error {
outputStream, err := ds.s.openWriteStream(ctx, name)
if err != nil {
return err
diff --git a/pkg/datastore/datastore_test.go b/pkg/datastore/datastore_test.go
index 966cc45..6b53f8a 100644
--- a/pkg/datastore/datastore_test.go
+++ b/pkg/datastore/datastore_test.go
@@ -32,10 +32,10 @@ import (
type mockStore struct {
fKind func() string
fAddress func() string
- fOpenReadStream func(ctx context.Context, name common.BlobName) (io.ReadCloser, error)
- fOpenWriteStream func(ctx context.Context, name common.BlobName) (WriteCloseCanceller, error)
- fExists func(ctx context.Context, name common.BlobName) (bool, error)
- fDelete func(ctx context.Context, name common.BlobName) error
+ fOpenReadStream func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error)
+ fOpenWriteStream func(ctx context.Context, name *common.BlobName) (WriteCloseCanceller, error)
+ fExists func(ctx context.Context, name *common.BlobName) (bool, error)
+ fDelete func(ctx context.Context, name *common.BlobName) error
}
func (s *mockStore) kind() string {
@@ -44,16 +44,16 @@ func (s *mockStore) kind() string {
func (s *mockStore) address() string {
return s.fAddress()
}
-func (s *mockStore) openReadStream(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (s *mockStore) openReadStream(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
return s.fOpenReadStream(ctx, name)
}
-func (s *mockStore) openWriteStream(ctx context.Context, name common.BlobName) (WriteCloseCanceller, error) {
+func (s *mockStore) openWriteStream(ctx context.Context, name *common.BlobName) (WriteCloseCanceller, error) {
return s.fOpenWriteStream(ctx, name)
}
-func (s *mockStore) exists(ctx context.Context, name common.BlobName) (bool, error) {
+func (s *mockStore) exists(ctx context.Context, name *common.BlobName) (bool, error) {
return s.fExists(ctx, name)
}
-func (s *mockStore) delete(ctx context.Context, name common.BlobName) error {
+func (s *mockStore) delete(ctx context.Context, name *common.BlobName) error {
return s.fDelete(ctx, name)
}
@@ -77,7 +77,7 @@ func TestDatastoreWriteFailure(t *testing.T) {
t.Run("error on opening write stream", func(t *testing.T) {
errRet := errors.New("error")
ds := &datastore{s: &mockStore{
- fOpenWriteStream: func(ctx context.Context, name common.BlobName) (WriteCloseCanceller, error) {
+ fOpenWriteStream: func(ctx context.Context, name *common.BlobName) (WriteCloseCanceller, error) {
return nil, errRet
},
}}
@@ -92,7 +92,7 @@ func TestDatastoreWriteFailure(t *testing.T) {
closeCalled := false
cancelCalled := false
ds := &datastore{s: &mockStore{
- fOpenWriteStream: func(ctx context.Context, name common.BlobName) (WriteCloseCanceller, error) {
+ fOpenWriteStream: func(ctx context.Context, name *common.BlobName) (WriteCloseCanceller, error) {
return &mockWriteCloseCanceller{
fWrite: func(b []byte) (int, error) {
require.False(t, closeCalled)
@@ -111,7 +111,7 @@ func TestDatastoreWriteFailure(t *testing.T) {
},
}, nil
},
- fOpenReadStream: func(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+ fOpenReadStream: func(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
return nil, ErrNotFound
},
}}
diff --git a/pkg/datastore/interface.go b/pkg/datastore/interface.go
index 3f24a68..06a7e61 100644
--- a/pkg/datastore/interface.go
+++ b/pkg/datastore/interface.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -65,20 +65,20 @@ type DS interface {
// If a non-nil error is returned, the writer will be nil. Otherwise it
// is necessary to call the `Close` on the returned reader once done
// with the reader.
- Open(ctx context.Context, name common.BlobName) (io.ReadCloser, error)
+ Open(ctx context.Context, name *common.BlobName) (io.ReadCloser, error)
// Update retrieves an update for given blob. The data is read from given
// reader until it returns either EOF, ending successful save, or any other
// error which will cancel the save - in such case this error will be
// returned from this function. If the data does not pass validation,
// ErrInvalidData will be returned.
- Update(ctx context.Context, name common.BlobName, r io.Reader) error
+ Update(ctx context.Context, name *common.BlobName, r io.Reader) error
// Exists does check whether blob of given name exists in the datastore.
// Partially written blobs are equal to non-existing ones. Boolean value
// returned indicates whether the blob exists or not, non-nil error indicates
// that there was an error while trying to check blob's existence.
- Exists(ctx context.Context, name common.BlobName) (bool, error)
+ Exists(ctx context.Context, name *common.BlobName) (bool, error)
// Delete tries to remove blob with given name from the datastore.
// If blob does not exist (which includes partially written blobs)
@@ -87,5 +87,5 @@ type DS interface {
// read the blob data. After the `Delete` call succeeds, trying to read
// the blob with the `Open` should end up with an ErrNotFound error
// until the blob is updated again with a successful `Update` call.
- Delete(ctx context.Context, name common.BlobName) error
+ Delete(ctx context.Context, name *common.BlobName) error
}
diff --git a/pkg/datastore/multi_source.go b/pkg/datastore/multi_source.go
index a47cabb..e2084b7 100644
--- a/pkg/datastore/multi_source.go
+++ b/pkg/datastore/multi_source.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -75,25 +75,25 @@ func (m *multiSourceDatastore) Address() string {
return "multi-source://"
}
-func (m *multiSourceDatastore) Open(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (m *multiSourceDatastore) Open(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
m.fetch(ctx, name)
return m.main.Open(ctx, name)
}
-func (m *multiSourceDatastore) Update(ctx context.Context, name common.BlobName, r io.Reader) error {
+func (m *multiSourceDatastore) Update(ctx context.Context, name *common.BlobName, r io.Reader) error {
return m.main.Update(ctx, name, r)
}
-func (m *multiSourceDatastore) Exists(ctx context.Context, name common.BlobName) (bool, error) {
+func (m *multiSourceDatastore) Exists(ctx context.Context, name *common.BlobName) (bool, error) {
m.fetch(ctx, name)
return m.main.Exists(ctx, name)
}
-func (m *multiSourceDatastore) Delete(ctx context.Context, name common.BlobName) error {
+func (m *multiSourceDatastore) Delete(ctx context.Context, name *common.BlobName) error {
return m.main.Delete(ctx, name)
}
-func (m *multiSourceDatastore) fetch(ctx context.Context, name common.BlobName) {
+func (m *multiSourceDatastore) fetch(ctx context.Context, name *common.BlobName) {
// TODO:
// if not found locally, go over all additional sources and check if exists,
// for dynamic content, perform merge operation if found in more than one,
diff --git a/pkg/datastore/multi_source_test.go b/pkg/datastore/multi_source_test.go
index a6faa7b..a06badb 100644
--- a/pkg/datastore/multi_source_test.go
+++ b/pkg/datastore/multi_source_test.go
@@ -31,7 +31,7 @@ import (
func TestMultiSourceDatastore(t *testing.T) {
- addBlob := func(ds DS, c string) common.BlobName {
+ addBlob := func(ds DS, c string) *common.BlobName {
hash := sha256.Sum256([]byte(c))
name, err := common.BlobNameFromHashAndType(hash[:], blobtypes.Static)
require.NoError(t, err)
@@ -40,7 +40,7 @@ func TestMultiSourceDatastore(t *testing.T) {
return name
}
- fetchBlob := func(ds DS, n common.BlobName) string {
+ fetchBlob := func(ds DS, n *common.BlobName) string {
rc, err := ds.Open(context.Background(), n)
require.NoError(t, err)
@@ -53,7 +53,7 @@ func TestMultiSourceDatastore(t *testing.T) {
return string(data)
}
- ensureNotFound := func(ds DS, n common.BlobName) {
+ ensureNotFound := func(ds DS, n *common.BlobName) {
_, err := ds.Open(context.Background(), n)
require.ErrorIs(t, err, ErrNotFound)
}
diff --git a/pkg/datastore/storage.go b/pkg/datastore/storage.go
index f4c6545..d9d3bfa 100644
--- a/pkg/datastore/storage.go
+++ b/pkg/datastore/storage.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -31,8 +31,8 @@ type WriteCloseCanceller interface {
type storage interface {
kind() string
address() string
- openReadStream(ctx context.Context, name common.BlobName) (io.ReadCloser, error)
- openWriteStream(ctx context.Context, name common.BlobName) (WriteCloseCanceller, error)
- exists(ctx context.Context, name common.BlobName) (bool, error)
- delete(ctx context.Context, name common.BlobName) error
+ openReadStream(ctx context.Context, name *common.BlobName) (io.ReadCloser, error)
+ openWriteStream(ctx context.Context, name *common.BlobName) (WriteCloseCanceller, error)
+ exists(ctx context.Context, name *common.BlobName) (bool, error)
+ delete(ctx context.Context, name *common.BlobName) error
}
diff --git a/pkg/datastore/storage_filesystem.go b/pkg/datastore/storage_filesystem.go
index 4bea382..7472584 100644
--- a/pkg/datastore/storage_filesystem.go
+++ b/pkg/datastore/storage_filesystem.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -52,7 +52,7 @@ func (fs *fileSystem) address() string {
return filePrefix + fs.path
}
-func (fs *fileSystem) openReadStream(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (fs *fileSystem) openReadStream(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
rc, err := os.Open(fs.getFileName(name, fsSuffixCurrent))
if os.IsNotExist(err) {
return nil, ErrNotFound
@@ -60,7 +60,7 @@ func (fs *fileSystem) openReadStream(ctx context.Context, name common.BlobName)
return rc, err
}
-func (fs *fileSystem) createTemporaryWriteStream(name common.BlobName) (*os.File, error) {
+func (fs *fileSystem) createTemporaryWriteStream(name *common.BlobName) (*os.File, error) {
tempName := fs.getFileName(name, fsSuffixUpload)
// Ensure dir exists
@@ -121,7 +121,7 @@ func (w *fileSystemWriteCloser) Close() error {
return nil
}
-func (fs *fileSystem) openWriteStream(ctx context.Context, name common.BlobName) (WriteCloseCanceller, error) {
+func (fs *fileSystem) openWriteStream(ctx context.Context, name *common.BlobName) (WriteCloseCanceller, error) {
fl, err := fs.createTemporaryWriteStream(name)
if err != nil {
@@ -134,7 +134,7 @@ func (fs *fileSystem) openWriteStream(ctx context.Context, name common.BlobName)
}, nil
}
-func (fs *fileSystem) exists(ctx context.Context, name common.BlobName) (bool, error) {
+func (fs *fileSystem) exists(ctx context.Context, name *common.BlobName) (bool, error) {
_, err := os.Stat(fs.getFileName(name, fsSuffixCurrent))
if os.IsNotExist(err) {
return false, nil
@@ -145,7 +145,7 @@ func (fs *fileSystem) exists(ctx context.Context, name common.BlobName) (bool, e
return true, nil
}
-func (fs *fileSystem) delete(ctx context.Context, name common.BlobName) error {
+func (fs *fileSystem) delete(ctx context.Context, name *common.BlobName) error {
err := os.Remove(fs.getFileName(name, fsSuffixCurrent))
if os.IsNotExist(err) {
return ErrNotFound
@@ -153,7 +153,7 @@ func (fs *fileSystem) delete(ctx context.Context, name common.BlobName) error {
return err
}
-func (fs *fileSystem) getFileName(name common.BlobName, suffix string) string {
+func (fs *fileSystem) getFileName(name *common.BlobName, suffix string) string {
fNameParts := []string{fs.path}
nameStr := name.String()
diff --git a/pkg/datastore/storage_memory.go b/pkg/datastore/storage_memory.go
index 90cabc7..3abf51e 100644
--- a/pkg/datastore/storage_memory.go
+++ b/pkg/datastore/storage_memory.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -54,7 +54,7 @@ func (m *memory) address() string {
return memoryPrefix
}
-func (m *memory) openReadStream(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (m *memory) openReadStream(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
m.rw.RLock()
defer m.rw.RUnlock()
@@ -92,7 +92,7 @@ func (w *memoryWriteCloser) Close() error {
return nil
}
-func (m *memory) openWriteStream(ctx context.Context, name common.BlobName) (WriteCloseCanceller, error) {
+func (m *memory) openWriteStream(ctx context.Context, name *common.BlobName) (WriteCloseCanceller, error) {
m.rw.Lock()
defer m.rw.Unlock()
@@ -111,7 +111,7 @@ func (m *memory) openWriteStream(ctx context.Context, name common.BlobName) (Wri
}, nil
}
-func (m *memory) exists(ctx context.Context, n common.BlobName) (bool, error) {
+func (m *memory) exists(ctx context.Context, n *common.BlobName) (bool, error) {
m.rw.RLock()
defer m.rw.RUnlock()
@@ -122,7 +122,7 @@ func (m *memory) exists(ctx context.Context, n common.BlobName) (bool, error) {
return true, nil
}
-func (m *memory) delete(ctx context.Context, n common.BlobName) error {
+func (m *memory) delete(ctx context.Context, n *common.BlobName) error {
m.rw.Lock()
defer m.rw.Unlock()
diff --git a/pkg/datastore/storage_raw_filesystem.go b/pkg/datastore/storage_raw_filesystem.go
index 7575403..a7f8253 100644
--- a/pkg/datastore/storage_raw_filesystem.go
+++ b/pkg/datastore/storage_raw_filesystem.go
@@ -50,7 +50,7 @@ func (fs *rawFileSystem) address() string {
return rawFilePrefix + fs.path
}
-func (fs *rawFileSystem) openReadStream(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (fs *rawFileSystem) openReadStream(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
rc, err := os.Open(filepath.Join(fs.path, name.String()))
if os.IsNotExist(err) {
return nil, ErrNotFound
@@ -81,7 +81,7 @@ func (w *rawFilesystemWriter) Cancel() {
os.Remove(w.file.Name())
}
-func (fs *rawFileSystem) openWriteStream(ctx context.Context, name common.BlobName) (WriteCloseCanceller, error) {
+func (fs *rawFileSystem) openWriteStream(ctx context.Context, name *common.BlobName) (WriteCloseCanceller, error) {
tempNum := atomic.AddUint64(&fs.tempFileNum, 1)
tempFileName := filepath.Join(fs.path, fmt.Sprintf("tempfile_%d", tempNum))
@@ -97,7 +97,7 @@ func (fs *rawFileSystem) openWriteStream(ctx context.Context, name common.BlobNa
}, nil
}
-func (fs *rawFileSystem) exists(ctx context.Context, name common.BlobName) (bool, error) {
+func (fs *rawFileSystem) exists(ctx context.Context, name *common.BlobName) (bool, error) {
_, err := os.Stat(filepath.Join(fs.path, name.String()))
if os.IsNotExist(err) {
return false, nil
@@ -108,7 +108,7 @@ func (fs *rawFileSystem) exists(ctx context.Context, name common.BlobName) (bool
return true, nil
}
-func (fs *rawFileSystem) delete(ctx context.Context, name common.BlobName) error {
+func (fs *rawFileSystem) delete(ctx context.Context, name *common.BlobName) error {
err := os.Remove(filepath.Join(fs.path, name.String()))
if os.IsNotExist(err) {
return ErrNotFound
diff --git a/pkg/datastore/storage_test.go b/pkg/datastore/storage_test.go
index 1a3f10b..aeed0ef 100644
--- a/pkg/datastore/storage_test.go
+++ b/pkg/datastore/storage_test.go
@@ -121,7 +121,7 @@ func TestStorageSaveOpenCancelSuccess(t *testing.T) {
func TestStorageDelete(t *testing.T) {
for _, st := range allTestStorages(t) {
t.Run(st.kind(), func(t *testing.T) {
- blobNames := []common.BlobName{}
+ blobNames := []*common.BlobName{}
blobDatas := [][]byte{}
t.Run("generate test data", func(t *testing.T) {
diff --git a/pkg/datastore/utils_autogen_for_test.go b/pkg/datastore/utils_autogen_for_test.go
index 91a5491..10a3c2b 100644
--- a/pkg/datastore/utils_autogen_for_test.go
+++ b/pkg/datastore/utils_autogen_for_test.go
@@ -26,64 +26,65 @@ import (
"github.com/cinode/go/pkg/blobtypes"
"github.com/cinode/go/pkg/common"
"github.com/cinode/go/pkg/internal/blobtypes/dynamiclink"
+ "github.com/cinode/go/pkg/utilities/golang"
"github.com/jbenet/go-base58"
)
var testBlobs = []struct {
- name common.BlobName
+ name *common.BlobName
data []byte
expected []byte
}{
// Static blobs
{
- common.BlobName(base58.Decode("KDc2ijtWc9mGxb5hP29YSBgkMLH8wCWnVimpvP3M6jdAk")),
+ golang.Must(common.BlobNameFromString("KDc2ijtWc9mGxb5hP29YSBgkMLH8wCWnVimpvP3M6jdAk")),
base58.Decode("3A836b"),
base58.Decode("3A836b"),
},
{
- common.BlobName(base58.Decode("BG8WaXMAckEfbCuoiHpx2oMAS4zAaPqAqrgf5Q3YNzmHx")),
+ golang.Must(common.BlobNameFromString("BG8WaXMAckEfbCuoiHpx2oMAS4zAaPqAqrgf5Q3YNzmHx")),
base58.Decode("AXG4Ffv"),
base58.Decode("AXG4Ffv"),
},
{
- common.BlobName(base58.Decode("2GLoj4Bk7SvjQngCT85gxWRu2DXCCjs9XWKsSpM85Wq3Ve")),
+ golang.Must(common.BlobNameFromString("2GLoj4Bk7SvjQngCT85gxWRu2DXCCjs9XWKsSpM85Wq3Ve")),
base58.Decode(""),
base58.Decode(""),
},
{
- common.BlobName(base58.Decode("251SEdnHjwyvUqX1EZnuKruta4yHMkTDed7LGoi3nUJwhx")),
+ golang.Must(common.BlobNameFromString("251SEdnHjwyvUqX1EZnuKruta4yHMkTDed7LGoi3nUJwhx")),
base58.Decode("1DhLfjA9ij9QFBh7J8ysnN3uvGcsNQa7vaxKEwbYEMSEXuZbgyCtUAn5M4QxmgLVnCJ6cARY5Ry2EJVXxn48D837xGxRp1M2rRnz9BHVGw2sc9Ee1DkLmsurGoKX1Evt2iuMhNQyNGh2CrsHWxdGTvZVhpHShmKRziHZEDybK4ZaJh9RvTEngYQkeHAtC3J3TW6dbpaNWBNLD6YdU5xPcaE3AUPMnk4CM1dD8XMBRQekZguNJHNZwNQCXRQodVyGLVRzi1dkTG2odnrcbZ4i3oNxyJyz"),
base58.Decode("4wNoVjVdtJ5FKtD3ZmHW4bvTiWgZFmwmps9JEJxDdinXscjMWjjeTQo2Hzwkg6GnFp1kmNoSZR9d5hXnG4qHi6mx2KqM7SVJ"),
},
{
- common.BlobName(base58.Decode("27vP1JG4VJNZvQJ4Zfhy3H5xKugurbh89B7rKTcStM9guB")),
+ golang.Must(common.BlobNameFromString("27vP1JG4VJNZvQJ4Zfhy3H5xKugurbh89B7rKTcStM9guB")),
base58.Decode("1eE2wp1836WtQmEbjdavggJvFPU7dZbQQH5EBS2LwBL2rYjArM9mjvWCrAbpZDkLFx7dQ5FyejnHD1EbwofDDLa1zNmN94qws1UfhNM4KCBT4oijCfPbJHobp7h5tcZQwMZy1gA3jTQBRvem2ioNuSFwqKRwbVJs9S21QFB86XuuUggNmj6sfAsDKwvE4M5EQxSkDft3CFiUX6XUMgCJUAreBRoT32wz7ncNbFaETMscFTTjFUYYiUFuv6fQESbfDCV3rfcSmxSLbLqm2u2Pd83cnzqfH"),
base58.Decode("8Ya88xk8C7tnYXAKJL9t1bCoBUur9dLr44SwhchsBh7UQb7TmZihVpffndCxLmhH9YjMrQj442YhiW3Hr2bBUR4vCcn6VdJLK"),
},
{
- common.BlobName(base58.Decode("e3T1HcdDLc73NHed2SFu5XHUQx5KDwgdAYTMmmEk2Ekqm")),
+ golang.Must(common.BlobNameFromString("e3T1HcdDLc73NHed2SFu5XHUQx5KDwgdAYTMmmEk2Ekqm")),
base58.Decode("1yULPpEx3gjpKNBLCEzb2oj2xRcdGfr88CztgfYfEipBGiJCijqWBEEhXTaReU6CBcbt61h2DeGoZhgAfTiEwppGkJWCJrtmkSiLiib8UhupERptC3U2j6BKDg8PLwHq113WKJWM4tr2c3WxTXTSosjk7fBhuz3GJgqdYLecBfnKMGUqw8XkBf2Lth2REAw4ccZmmYn21x1W1tFdVCe4cAzAEqc5adJC3j3prPsYvL8QSqBZE5nQcnvfGekTUqn7HDZbZvqFN3TKc8HSVK9YUQ"),
base58.Decode("MpaLZEfQpasGN1khuvpTC6CFnJucjVmzRfZwaxJkti1uQAetXnvDL8PmrFHZkr7XX1GtKaQqB2P6M2KZjCYCTfxMZi"),
},
}
var dynamicLinkPropagationData = []struct {
- name common.BlobName
+ name *common.BlobName
data []byte
expected []byte
}{
{
- common.BlobName(base58.Decode("GUnL66Lyv2Qs4baxPhy59kF4dsB9HWakvTjMBjNGFLT6g")),
+ golang.Must(common.BlobNameFromString("GUnL66Lyv2Qs4baxPhy59kF4dsB9HWakvTjMBjNGFLT6g")),
base58.Decode("17Jk3QJMCABypJWuAivYwMi43gN1KxPVy3qg1e4HYQFe8BCQPVm6GX8auaFjXhwZZQhxaHjDirXH6Ze59irpWSkBicnqigPcd6j5H9AjnPHTHRKhyLSSX5kqkVRiwSRvTojGvx6oeMqj2hyhK9LxStjtYVW7WKxoCwATgQbkUWRszH2Eff3bHND8RbknhfZDSvSmXxSR8h6tMTErcV8dGyPYUysdV6Gd9bEK8bjRs6NxhCLpQ55dvZcwEi6i7rqo2WQWhY7HMMhmKhggvLXcReaUMTByq"),
base58.Decode("PnB1W5tcQdkzYrnvE8Z1BAsBgv9kVgdeZMp78WYxnJJKi2RDPHgx9VvzYZ1hzGhVxBetGfuxwdstH8E9oNiUQ6JDNPWYZAXE7"),
},
{
- common.BlobName(base58.Decode("GUnL66Lyv2Qs4baxPhy59kF4dsB9HWakvTjMBjNGFLT6g")),
+ golang.Must(common.BlobNameFromString("GUnL66Lyv2Qs4baxPhy59kF4dsB9HWakvTjMBjNGFLT6g")),
base58.Decode("17Jk3QJMCABypJWuAivYwMi43gN1KxPVy3qg1e4HYQFe8BCQPVm6GX8cxHJfhdYkZjq51cCNcGKTXirXxYcaGA5XdykeezU9P6jE72kmmpLNthhndMuE9oz7p725mWqPYMbMiw4Qp54oiRWdxEvh3yKRvjRA7MFK9ZJKGY1evFGbqsaMAE715aRYvP3yNjE7FaNwkKbAn1xJm4ojF4qjtaNN5zxHRgQfdZYLgybbsYJ3TJUNMxxNPkqu2CsiieeKJpJce8U5g3HAP6jAKSiXMBcmBfGm8"),
base58.Decode("mgbcvX3FFDqwigwuybL2misVJSLjXzZs9bumic8rFSHCD9nMqbmsTxNWnRpoVn3E2GKQaFcUdUzhMax1oiq5X9abrKYqXYMtN"),
},
{
- common.BlobName(base58.Decode("GUnL66Lyv2Qs4baxPhy59kF4dsB9HWakvTjMBjNGFLT6g")),
+ golang.Must(common.BlobNameFromString("GUnL66Lyv2Qs4baxPhy59kF4dsB9HWakvTjMBjNGFLT6g")),
base58.Decode("17Jk3QJMCABypJWuAivYwMi43gN1KxPVy3qg1e4HYQFe8BCQPVm6GX8bbhTds8inbAV58TPXDmM19FuZEFGP1B3w9gBPoTfVQUfrhmB2A4uBrcKSxFBMNT8djhviFuunpME39ZSEZp3KS4w1jms7gKnoG237vs4vnNn4uRVF6pj5oorff4VxECGVektdbkiU2BcAUQUHbkqkcw3f3sX5Rtw5Ckv5mBzaa4zqUtLiK7eYp8Wqc5Au7mzTuXvPDpWbX85hz7EnDsuHQEoZAeFCFeWdzZSgS"),
base58.Decode("WZpzxEiTLyv42JwAfYCTo7TckS1bLY6XmuoJWoqz8BVzYNqUSvDf58KJR6tjuEegLRYCkiprPskdP7PMFP6wazLxed8JEPAsC"),
},
@@ -92,14 +93,14 @@ var dynamicLinkPropagationData = []struct {
func TestDatasetGeneration(t *testing.T) {
t.SkipNow()
- dumpBlob := func(name, content []byte, expected []byte) {
+ dumpBlob := func(name *common.BlobName, content []byte, expected []byte) {
fmt.Printf(""+
" {\n"+
- " common.BlobName(base58.Decode(\"%s\")),\n"+
+ " golang.Must(common.BlobNameFromString(\"%s\")),\n"+
" base58.Decode(\"%s\"),\n"+
" base58.Decode(\"%s\"),\n"+
" },\n",
- base58.Encode(name),
+ name.String(),
base58.Encode(content),
base58.Encode(expected),
)
diff --git a/pkg/datastore/utils_for_test.go b/pkg/datastore/utils_for_test.go
index a7926e4..afc8f50 100644
--- a/pkg/datastore/utils_for_test.go
+++ b/pkg/datastore/utils_for_test.go
@@ -25,7 +25,7 @@ import (
"github.com/cinode/go/pkg/common"
)
-var emptyBlobNameStatic = func() common.BlobName {
+var emptyBlobNameStatic = func() *common.BlobName {
bn, err := common.BlobNameFromHashAndType(sha256.New().Sum(nil), blobtypes.Static)
if err != nil {
panic(err)
@@ -33,7 +33,7 @@ var emptyBlobNameStatic = func() common.BlobName {
return bn
}()
-var emptyBlobNameDynamicLink = func() common.BlobName {
+var emptyBlobNameDynamicLink = func() *common.BlobName {
bn, err := common.BlobNameFromHashAndType(sha256.New().Sum(nil), blobtypes.DynamicLink)
if err != nil {
panic(err)
@@ -41,7 +41,7 @@ var emptyBlobNameDynamicLink = func() common.BlobName {
return bn
}()
-var emptyBlobNamesOfAllTypes = []common.BlobName{
+var emptyBlobNamesOfAllTypes = []*common.BlobName{
emptyBlobNameStatic,
emptyBlobNameDynamicLink,
}
diff --git a/pkg/datastore/webconnector.go b/pkg/datastore/webconnector.go
index f015f39..3017e54 100644
--- a/pkg/datastore/webconnector.go
+++ b/pkg/datastore/webconnector.go
@@ -83,7 +83,7 @@ func (w *webConnector) Address() string {
return w.baseURL
}
-func (w *webConnector) Open(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (w *webConnector) Open(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
switch name.Type() {
case blobtypes.Static:
return w.openStatic(ctx, name)
@@ -94,7 +94,7 @@ func (w *webConnector) Open(ctx context.Context, name common.BlobName) (io.ReadC
}
}
-func (w *webConnector) openStatic(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (w *webConnector) openStatic(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
req, err := http.NewRequestWithContext(
ctx,
http.MethodGet,
@@ -125,7 +125,7 @@ func (w *webConnector) openStatic(ctx context.Context, name common.BlobName) (io
}, nil
}
-func (w *webConnector) openDynamicLink(ctx context.Context, name common.BlobName) (io.ReadCloser, error) {
+func (w *webConnector) openDynamicLink(ctx context.Context, name *common.BlobName) (io.ReadCloser, error) {
req, err := http.NewRequestWithContext(
ctx,
http.MethodGet,
@@ -156,7 +156,7 @@ func (w *webConnector) openDynamicLink(ctx context.Context, name common.BlobName
return io.NopCloser(bytes.NewReader(buff.Bytes())), nil
}
-func (w *webConnector) Update(ctx context.Context, name common.BlobName, r io.Reader) error {
+func (w *webConnector) Update(ctx context.Context, name *common.BlobName, r io.Reader) error {
req, err := http.NewRequestWithContext(
ctx,
http.MethodPut,
@@ -178,7 +178,7 @@ func (w *webConnector) Update(ctx context.Context, name common.BlobName, r io.Re
return w.errCheck(res)
}
-func (w *webConnector) Exists(ctx context.Context, name common.BlobName) (bool, error) {
+func (w *webConnector) Exists(ctx context.Context, name *common.BlobName) (bool, error) {
req, err := http.NewRequestWithContext(
ctx,
http.MethodHead,
@@ -205,7 +205,7 @@ func (w *webConnector) Exists(ctx context.Context, name common.BlobName) (bool,
return false, err
}
-func (w *webConnector) Delete(ctx context.Context, name common.BlobName) error {
+func (w *webConnector) Delete(ctx context.Context, name *common.BlobName) error {
req, err := http.NewRequestWithContext(
ctx,
http.MethodDelete,
diff --git a/pkg/datastore/webinterface.go b/pkg/datastore/webinterface.go
index 60e03c0..5d756fb 100644
--- a/pkg/datastore/webinterface.go
+++ b/pkg/datastore/webinterface.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -76,7 +76,7 @@ func (i *webInterface) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
-func (i *webInterface) getName(w http.ResponseWriter, r *http.Request) (common.BlobName, error) {
+func (i *webInterface) getName(w http.ResponseWriter, r *http.Request) (*common.BlobName, error) {
// Don't allow url queries and require path to start with '/'
if r.URL.Path[0] != '/' || r.URL.RawQuery != "" {
return nil, common.ErrInvalidBlobName
@@ -90,7 +90,7 @@ func (i *webInterface) getName(w http.ResponseWriter, r *http.Request) (common.B
return bn, nil
}
-func (i *webInterface) sendName(name common.BlobName, w http.ResponseWriter, r *http.Request) {
+func (i *webInterface) sendName(name *common.BlobName, w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-type", "application/json")
json.NewEncoder(w).Encode(&webNameResponse{
Name: name.String(),
diff --git a/pkg/datastore/webinterface_test.go b/pkg/datastore/webinterface_test.go
index e733108..8d2cd28 100644
--- a/pkg/datastore/webinterface_test.go
+++ b/pkg/datastore/webinterface_test.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -97,7 +97,7 @@ func TestWebInterfaceDeleteQueryString(t *testing.T) {
func TestWebIntefaceExistsFailure(t *testing.T) {
server := httptest.NewServer(WebInterface(&datastore{
s: &mockStore{
- fExists: func(ctx context.Context, name common.BlobName) (bool, error) { return false, errors.New("fail") },
+ fExists: func(ctx context.Context, name *common.BlobName) (bool, error) { return false, errors.New("fail") },
},
}))
defer server.Close()
diff --git a/pkg/internal/blobtypes/dynamiclink/public.go b/pkg/internal/blobtypes/dynamiclink/public.go
index 5d8cbbe..ded9c80 100644
--- a/pkg/internal/blobtypes/dynamiclink/public.go
+++ b/pkg/internal/blobtypes/dynamiclink/public.go
@@ -61,7 +61,7 @@ type Public struct {
nonce uint64
}
-func (d *Public) BlobName() common.BlobName {
+func (d *Public) BlobName() *common.BlobName {
hasher := sha256.New()
storeByte(hasher, reservedByteValue)
@@ -80,7 +80,7 @@ type PublicReader struct {
Public
contentVersion uint64
signature []byte
- iv []byte
+ iv *common.BlobIV
r io.Reader
}
@@ -88,7 +88,7 @@ type PublicReader struct {
//
// Invalid links are rejected - i.e. if there's any error while reading the data
// or when the validation of the link fails for whatever reason
-func FromPublicData(name common.BlobName, r io.Reader) (*PublicReader, error) {
+func FromPublicData(name *common.BlobName, r io.Reader) (*PublicReader, error) {
dl := PublicReader{
Public: Public{
publicKey: make([]byte, ed25519.PublicKeySize),
@@ -119,7 +119,7 @@ func FromPublicData(name common.BlobName, r io.Reader) (*PublicReader, error) {
return nil, err
}
- if !bytes.Equal(dl.BlobName(), name) {
+ if !dl.BlobName().Equal(name) {
return nil, ErrInvalidDynamicLinkDataBlobName
}
@@ -140,10 +140,11 @@ func FromPublicData(name common.BlobName, r io.Reader) (*PublicReader, error) {
return nil, err
}
- dl.iv, err = readDynamicSizeBuff(r, "iv")
+ iv, err := readDynamicSizeBuff(r, "iv")
if err != nil {
return nil, err
}
+ dl.iv = common.BlobIVFromBytes(iv)
// Starting from validations at this point, errors are returned while reading.
// This is to prepare for future improvements when real streaming is
@@ -201,7 +202,7 @@ func (d *PublicReader) GetPublicDataReader() io.Reader {
// Preamble - dynamic link data
storeBuff(w, d.signature)
storeUint64(w, d.contentVersion)
- storeDynamicSizeBuff(w, d.iv)
+ storeDynamicSizeBuff(w, d.iv.Bytes())
return io.MultiReader(
bytes.NewReader(w.Bytes()), // Preamble
@@ -213,7 +214,7 @@ func (d *PublicReader) toSignDataHasherPrefilled() hash.Hash {
h := sha256.New()
storeByte(h, signatureForLinkData)
- storeDynamicSizeBuff(h, d.BlobName())
+ storeDynamicSizeBuff(h, d.BlobName().Bytes())
return h
}
@@ -238,13 +239,13 @@ func (d *PublicReader) GreaterThan(d2 *PublicReader) bool {
func (d *PublicReader) ivGeneratorPrefilled() cipherfactory.IVGenerator {
ivGenerator := cipherfactory.NewIVGenerator(blobtypes.DynamicLink)
- storeDynamicSizeBuff(ivGenerator, d.BlobName())
+ storeDynamicSizeBuff(ivGenerator, d.BlobName().Bytes())
storeUint64(ivGenerator, d.contentVersion)
return ivGenerator
}
-func (d *PublicReader) validateKeyInLinkData(key cipherfactory.Key, r io.Reader) error {
+func (d *PublicReader) validateKeyInLinkData(key *common.BlobKey, r io.Reader) error {
// At the beginning of the data there's the key validation block,
// that block contains a proof that the encryption key was deterministically derived
// from the blob name (thus preventing weak key attack)
@@ -256,7 +257,7 @@ func (d *PublicReader) validateKeyInLinkData(key cipherfactory.Key, r io.Reader)
dataSeed := append(
[]byte{signatureForEncryptionKeyGeneration},
- d.BlobName()...,
+ d.BlobName().Bytes()...,
)
// Key validation block contains the signature of data seed
@@ -269,14 +270,14 @@ func (d *PublicReader) validateKeyInLinkData(key cipherfactory.Key, r io.Reader)
keyGenerator.Write(signature)
generatedKey := keyGenerator.Generate()
- if !bytes.Equal(generatedKey, key) {
+ if !generatedKey.Equal(key) {
return ErrInvalidDynamicLinkKeyMismatch
}
return nil
}
-func (d *PublicReader) GetLinkDataReader(key cipherfactory.Key) (io.Reader, error) {
+func (d *PublicReader) GetLinkDataReader(key *common.BlobKey) (io.Reader, error) {
r, err := cipherfactory.StreamCipherReader(key, d.iv, d.GetEncryptedLinkReader())
if err != nil {
@@ -305,7 +306,7 @@ func (d *PublicReader) GetLinkDataReader(key cipherfactory.Key) (io.Reader, erro
return validatingreader.CheckOnEOF(
r,
func() error {
- if !bytes.Equal(ivHasher.Generate(), d.iv) {
+ if !d.iv.Equal(ivHasher.Generate()) {
return ErrInvalidDynamicLinkIVMismatch
}
diff --git a/pkg/internal/blobtypes/dynamiclink/public_test.go b/pkg/internal/blobtypes/dynamiclink/public_test.go
index ae31481..5c030eb 100644
--- a/pkg/internal/blobtypes/dynamiclink/public_test.go
+++ b/pkg/internal/blobtypes/dynamiclink/public_test.go
@@ -37,7 +37,7 @@ func TestFromPublicData(t *testing.T) {
t.Run("Ensure we don't crash on truncated data", func(t *testing.T) {
for i := 0; i < 1000; i++ {
data := make([]byte, i)
- dl, err := FromPublicData(common.BlobName{}, bytes.NewReader(data))
+ dl, err := FromPublicData(&common.BlobName{}, bytes.NewReader(data))
require.ErrorIs(t, err, ErrInvalidDynamicLinkData)
require.Nil(t, dl)
}
@@ -45,7 +45,7 @@ func TestFromPublicData(t *testing.T) {
t.Run("Do not accept the link if reserved byte is not zero", func(t *testing.T) {
data := []byte{0xFF, 0, 0, 0}
- dl, err := FromPublicData(common.BlobName{}, bytes.NewReader(data))
+ dl, err := FromPublicData(&common.BlobName{}, bytes.NewReader(data))
require.ErrorIs(t, err, ErrInvalidDynamicLinkData)
require.ErrorIs(t, err, ErrInvalidDynamicLinkDataReservedByte)
require.Nil(t, dl)
@@ -307,7 +307,9 @@ func TestPublicReaderGetLinkDataReader(t *testing.T) {
require.NoError(t, err)
// Flip a single bit in IV
- pr.iv[len(pr.iv)/2] ^= 0x80
+ ivBytes := pr.iv.Bytes()
+ ivBytes[len(ivBytes)/2] ^= 0x80
+ pr.iv = common.BlobIVFromBytes(ivBytes)
// Because the IV is incorrect, key validation block that is encrypted will be invalid
// thus the method will complain about key, not the IV that will fail first
@@ -322,7 +324,7 @@ func TestPublicReaderGetLinkDataReader(t *testing.T) {
pr, _, err := link.UpdateLinkData(bytes.NewReader([]byte("Hello world")), 0)
require.NoError(t, err)
- _, err = pr.GetLinkDataReader(nil)
+ _, err = pr.GetLinkDataReader(&common.BlobKey{})
require.ErrorIs(t, err, cipherfactory.ErrInvalidEncryptionConfigKeyType)
})
}
diff --git a/pkg/internal/blobtypes/dynamiclink/publisher.go b/pkg/internal/blobtypes/dynamiclink/publisher.go
index 8359b6f..b296d7a 100644
--- a/pkg/internal/blobtypes/dynamiclink/publisher.go
+++ b/pkg/internal/blobtypes/dynamiclink/publisher.go
@@ -24,6 +24,7 @@ import (
"io"
"github.com/cinode/go/pkg/blobtypes"
+ "github.com/cinode/go/pkg/common"
"github.com/cinode/go/pkg/internal/utilities/cipherfactory"
)
@@ -65,14 +66,15 @@ func Create(randSource io.Reader) (*Publisher, error) {
}, nil
}
-func FromAuthInfo(authInfo []byte) (*Publisher, error) {
- if len(authInfo) != 1+ed25519.SeedSize+8 || authInfo[0] != 0 {
+func FromAuthInfo(authInfo *common.AuthInfo) (*Publisher, error) {
+ authInfoBytes := authInfo.Bytes()
+ if len(authInfoBytes) != 1+ed25519.SeedSize+8 || authInfoBytes[0] != 0 {
return nil, ErrInvalidDynamicLinkAuthInfo
}
- privKey := ed25519.NewKeyFromSeed(authInfo[1 : 1+ed25519.SeedSize])
+ privKey := ed25519.NewKeyFromSeed(authInfoBytes[1 : 1+ed25519.SeedSize])
pubKey := privKey.Public().(ed25519.PublicKey)
- nonce := binary.BigEndian.Uint64(authInfo[1+ed25519.SeedSize:])
+ nonce := binary.BigEndian.Uint64(authInfoBytes[1+ed25519.SeedSize:])
return &Publisher{
Public: Public{
@@ -98,18 +100,18 @@ func ReNonce(p *Publisher, randSource io.Reader) (*Publisher, error) {
}, nil
}
-func (dl *Publisher) AuthInfo() []byte {
+func (dl *Publisher) AuthInfo() *common.AuthInfo {
var ret [1 + ed25519.SeedSize + 8]byte
ret[0] = reservedByteValue
copy(ret[1:], dl.privKey.Seed())
binary.BigEndian.PutUint64(ret[1+ed25519.SeedSize:], dl.nonce)
- return ret[:]
+ return common.AuthInfoFromBytes(ret[:])
}
-func (dl *Publisher) calculateEncryptionKey() ([]byte, []byte) {
+func (dl *Publisher) calculateEncryptionKey() (*common.BlobKey, []byte) {
dataSeed := append(
[]byte{signatureForEncryptionKeyGeneration},
- dl.BlobName()...,
+ dl.BlobName().Bytes()...,
)
signature := ed25519.Sign(dl.privKey, dataSeed)
@@ -122,7 +124,12 @@ func (dl *Publisher) calculateEncryptionKey() ([]byte, []byte) {
return key, signature
}
-func (dl *Publisher) UpdateLinkData(r io.Reader, version uint64) (*PublicReader, []byte, error) {
+func (dl *Publisher) EncryptionKey() *common.BlobKey {
+ key, _ := dl.calculateEncryptionKey()
+ return key
+}
+
+func (dl *Publisher) UpdateLinkData(r io.Reader, version uint64) (*PublicReader, *common.BlobKey, error) {
encryptionKey, kvb := dl.calculateEncryptionKey()
// key validation block precedes the link data
@@ -160,7 +167,7 @@ func (dl *Publisher) UpdateLinkData(r io.Reader, version uint64) (*PublicReader,
signatureHasher := pr.toSignDataHasherPrefilled()
storeUint64(signatureHasher, pr.contentVersion)
- storeDynamicSizeBuff(signatureHasher, pr.iv)
+ storeDynamicSizeBuff(signatureHasher, pr.iv.Bytes())
signatureHasher.Write(encryptedLinkBuff.Bytes())
pr.signature = ed25519.Sign(dl.privKey, signatureHasher.Sum(nil))
diff --git a/pkg/internal/blobtypes/dynamiclink/publisher_test.go b/pkg/internal/blobtypes/dynamiclink/publisher_test.go
index 7f15ca9..2e6b8f6 100644
--- a/pkg/internal/blobtypes/dynamiclink/publisher_test.go
+++ b/pkg/internal/blobtypes/dynamiclink/publisher_test.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -24,6 +24,7 @@ import (
"testing"
"testing/iotest"
+ "github.com/cinode/go/pkg/common"
"github.com/stretchr/testify/require"
)
@@ -71,15 +72,17 @@ func TestFromAuthInfo(t *testing.T) {
})
t.Run("Invalid auth info", func(t *testing.T) {
- for i := 0; i < len(authInfo)-1; i++ {
- dl2, err := FromAuthInfo(authInfo[:i])
+ authInfoBytes := authInfo.Bytes()
+ for i := 0; i < len(authInfoBytes)-1; i++ {
+ brokenAuthInfo := common.AuthInfoFromBytes(authInfoBytes[:i])
+ dl2, err := FromAuthInfo(brokenAuthInfo)
require.ErrorIs(t, err, ErrInvalidDynamicLinkAuthInfo)
require.Nil(t, dl2)
}
})
}
-func TestRenonc(t *testing.T) {
+func TestReNonce(t *testing.T) {
dl1, err := Create(rand.Reader)
require.NoError(t, err)
diff --git a/pkg/internal/blobtypes/dynamiclink/vectors_test.go b/pkg/internal/blobtypes/dynamiclink/vectors_test.go
index 8280ec5..8a41646 100644
--- a/pkg/internal/blobtypes/dynamiclink/vectors_test.go
+++ b/pkg/internal/blobtypes/dynamiclink/vectors_test.go
@@ -27,7 +27,6 @@ import (
"testing"
"github.com/cinode/go/pkg/common"
- "github.com/cinode/go/pkg/internal/utilities/cipherfactory"
"github.com/stretchr/testify/require"
)
@@ -68,8 +67,12 @@ func TestVectors(t *testing.T) {
t.Run(testCase.Name, func(t *testing.T) {
t.Run("validate public scope", func(t *testing.T) {
err := func() error {
+ bn, err := common.BlobNameFromBytes(testCase.BlobName)
+ if err != nil {
+ return err
+ }
pr, err := FromPublicData(
- common.BlobName(testCase.BlobName),
+ bn,
bytes.NewReader(testCase.UpdateDataset),
)
if err != nil {
@@ -95,16 +98,22 @@ func TestVectors(t *testing.T) {
t.Run("validate private scope", func(t *testing.T) {
err := func() error {
+ bn, err := common.BlobNameFromBytes(testCase.BlobName)
+ if err != nil {
+ return err
+ }
pr, err := FromPublicData(
- common.BlobName(testCase.BlobName),
+ bn,
bytes.NewReader(testCase.UpdateDataset),
)
if err != nil {
return err
}
- dr, err := pr.GetLinkDataReader(cipherfactory.Key(testCase.EncryptionKey))
+ dr, err := pr.GetLinkDataReader(
+ common.BlobKeyFromBytes(testCase.EncryptionKey),
+ )
if err != nil {
return err
}
diff --git a/pkg/internal/utilities/cipherfactory/cipher_factory.go b/pkg/internal/utilities/cipherfactory/cipher_factory.go
index 7d17dcb..6d8f3c0 100644
--- a/pkg/internal/utilities/cipherfactory/cipher_factory.go
+++ b/pkg/internal/utilities/cipherfactory/cipher_factory.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -22,6 +22,7 @@ import (
"fmt"
"io"
+ "github.com/cinode/go/pkg/common"
"golang.org/x/crypto/chacha20"
)
@@ -39,7 +40,7 @@ const (
reservedByteForKeyType byte = 0
)
-func StreamCipherReader(key Key, iv IV, r io.Reader) (io.Reader, error) {
+func StreamCipherReader(key *common.BlobKey, iv *common.BlobIV, r io.Reader) (io.Reader, error) {
stream, err := _cipherForKeyIV(key, iv)
if err != nil {
return nil, err
@@ -47,7 +48,7 @@ func StreamCipherReader(key Key, iv IV, r io.Reader) (io.Reader, error) {
return &cipher.StreamReader{S: stream, R: r}, nil
}
-func StreamCipherWriter(key Key, iv IV, w io.Writer) (io.Writer, error) {
+func StreamCipherWriter(key *common.BlobKey, iv *common.BlobIV, w io.Writer) (io.Writer, error) {
stream, err := _cipherForKeyIV(key, iv)
if err != nil {
return nil, err
@@ -55,18 +56,20 @@ func StreamCipherWriter(key Key, iv IV, w io.Writer) (io.Writer, error) {
return cipher.StreamWriter{S: stream, W: w}, nil
}
-func _cipherForKeyIV(key Key, iv IV) (cipher.Stream, error) {
- if len(key) == 0 || key[0] != reservedByteForKeyType {
+func _cipherForKeyIV(key *common.BlobKey, iv *common.BlobIV) (cipher.Stream, error) {
+ keyBytes := key.Bytes()
+ if len(keyBytes) == 0 || keyBytes[0] != reservedByteForKeyType {
return nil, ErrInvalidEncryptionConfigKeyType
}
- if len(key) != chacha20.KeySize+1 {
- return nil, fmt.Errorf("%w, got %d bytes", ErrInvalidEncryptionConfigKeySize, len(key)+1)
+ if len(keyBytes) != chacha20.KeySize+1 {
+ return nil, fmt.Errorf("%w, got %d bytes", ErrInvalidEncryptionConfigKeySize, len(keyBytes)+1)
}
- if len(iv) != chacha20.NonceSizeX {
- return nil, fmt.Errorf("%w, got %d bytes", ErrInvalidEncryptionConfigIVSize, len(iv))
+ ivBytes := iv.Bytes()
+ if len(ivBytes) != chacha20.NonceSizeX {
+ return nil, fmt.Errorf("%w, got %d bytes", ErrInvalidEncryptionConfigIVSize, len(ivBytes))
}
- return chacha20.NewUnauthenticatedCipher(key[1:], iv)
+ return chacha20.NewUnauthenticatedCipher(keyBytes[1:], ivBytes)
}
diff --git a/pkg/internal/utilities/cipherfactory/cipher_factory_test.go b/pkg/internal/utilities/cipherfactory/cipher_factory_test.go
index ea29b26..2a19954 100644
--- a/pkg/internal/utilities/cipherfactory/cipher_factory_test.go
+++ b/pkg/internal/utilities/cipherfactory/cipher_factory_test.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@ import (
"io"
"testing"
+ "github.com/cinode/go/pkg/common"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/chacha20"
)
@@ -59,13 +60,21 @@ func TestCipherForKeyIV(t *testing.T) {
},
} {
t.Run(d.desc, func(t *testing.T) {
- sr, err := StreamCipherReader(d.key, d.iv, bytes.NewReader([]byte{}))
+ sr, err := StreamCipherReader(
+ common.BlobKeyFromBytes(d.key),
+ common.BlobIVFromBytes(d.iv),
+ bytes.NewReader([]byte{}),
+ )
require.ErrorIs(t, err, d.err)
if err == nil {
require.NotNil(t, sr)
}
- sw, err := StreamCipherWriter(d.key, d.iv, bytes.NewBuffer(nil))
+ sw, err := StreamCipherWriter(
+ common.BlobKeyFromBytes(d.key),
+ common.BlobIVFromBytes(d.iv),
+ bytes.NewBuffer(nil),
+ )
require.ErrorIs(t, err, d.err)
if err == nil {
require.NotNil(t, sw)
@@ -75,8 +84,8 @@ func TestCipherForKeyIV(t *testing.T) {
}
func TestStreamCipherRoundtrip(t *testing.T) {
- key := make([]byte, chacha20.KeySize+1)
- iv := make([]byte, chacha20.NonceSizeX)
+ key := common.BlobKeyFromBytes(make([]byte, chacha20.KeySize+1))
+ iv := common.BlobIVFromBytes(make([]byte, chacha20.NonceSizeX))
data := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06}
buf := bytes.NewBuffer(nil)
diff --git a/pkg/internal/utilities/cipherfactory/generator.go b/pkg/internal/utilities/cipherfactory/generator.go
index 50caa71..06388b8 100644
--- a/pkg/internal/utilities/cipherfactory/generator.go
+++ b/pkg/internal/utilities/cipherfactory/generator.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2022 Bartłomiej Święcki (byo)
+Copyright © 2023 Bartłomiej Święcki (byo)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -33,7 +33,7 @@ const (
type KeyGenerator interface {
io.Writer
- Generate() Key
+ Generate() *common.BlobKey
}
type keyGenerator struct {
@@ -42,16 +42,16 @@ type keyGenerator struct {
func (g keyGenerator) Write(b []byte) (int, error) { return g.h.Write(b) }
-func (g keyGenerator) Generate() Key {
- return append(
+func (g keyGenerator) Generate() *common.BlobKey {
+ return common.BlobKeyFromBytes(append(
[]byte{reservedByteForKeyType},
g.h.Sum(nil)[:chacha20.KeySize]...,
- )
+ ))
}
type IVGenerator interface {
io.Writer
- Generate() IV
+ Generate() *common.BlobIV
}
type ivGenerator struct {
@@ -60,15 +60,14 @@ type ivGenerator struct {
func (g ivGenerator) Write(b []byte) (int, error) { return g.h.Write(b) }
-func (g ivGenerator) Generate() IV {
- return g.h.Sum(nil)[:chacha20.NonceSizeX]
+func (g ivGenerator) Generate() *common.BlobIV {
+ return common.BlobIVFromBytes(g.h.Sum(nil)[:chacha20.NonceSizeX])
}
func NewKeyGenerator(t common.BlobType) KeyGenerator {
h := sha256.New()
h.Write([]byte{preambleHashKey, reservedByteForKeyType, t.IDByte()})
return keyGenerator{h: h}
-
}
func NewIVGenerator(t common.BlobType) IVGenerator {
@@ -77,12 +76,12 @@ func NewIVGenerator(t common.BlobType) IVGenerator {
return ivGenerator{h: h}
}
-var defaultXChaCha20IV = func() IV {
+var defaultXChaCha20IV = func() *common.BlobIV {
h := sha256.New()
h.Write([]byte{preambleHashDefaultIV, reservedByteForKeyType})
- return h.Sum(nil)[:chacha20.NonceSizeX]
+ return common.BlobIVFromBytes(h.Sum(nil)[:chacha20.NonceSizeX])
}()
-func (k Key) DefaultIV() IV {
+func DefaultIV(k *common.BlobKey) *common.BlobIV {
return defaultXChaCha20IV
}
diff --git a/pkg/internal/utilities/cipherfactory/generator_test.go b/pkg/internal/utilities/cipherfactory/generator_test.go
index cd146d6..3cb5710 100644
--- a/pkg/internal/utilities/cipherfactory/generator_test.go
+++ b/pkg/internal/utilities/cipherfactory/generator_test.go
@@ -46,7 +46,7 @@ func TestGenerator(t *testing.T) {
_, err = _cipherForKeyIV(key, iv)
require.NoError(t, err)
- _, err = _cipherForKeyIV(key, key.DefaultIV())
+ _, err = _cipherForKeyIV(key, DefaultIV(key))
require.NoError(t, err)
// Check initial bytes of keys only - since key and IV are of different
@@ -55,9 +55,12 @@ func TestGenerator(t *testing.T) {
// then the generation of key and iv for the same input dataset would
// be using same hashed dataset which may be exploitable since IV
// is made public
- require.NotEqual(t, key[1:1+8], iv[:8])
- require.NotEqual(t, key[1:1+8], key.DefaultIV()[:8])
- require.NotEqual(t, iv[:8], key.DefaultIV()[:8])
+ keyBytes := key.Bytes()
+ ivBytes := iv.Bytes()
+ defIvBytes := DefaultIV(key).Bytes()
+ require.NotEqual(t, keyBytes[1:1+8], ivBytes[:8])
+ require.NotEqual(t, keyBytes[1:1+8], defIvBytes[:8])
+ require.NotEqual(t, ivBytes[:8], defIvBytes[:8])
// Note: once other key types are introduced, we should also check
// that for different key types there are different hashes
diff --git a/pkg/internal/utilities/headwriter/headwriter.go b/pkg/internal/utilities/headwriter/headwriter.go
new file mode 100644
index 0000000..4dfa6be
--- /dev/null
+++ b/pkg/internal/utilities/headwriter/headwriter.go
@@ -0,0 +1,45 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package headwriter
+
+type Writer struct {
+ limit int
+ data []byte
+}
+
+func New(limit int) Writer {
+ return Writer{
+ limit: limit,
+ data: make([]byte, 0, limit),
+ }
+}
+
+func (h *Writer) Write(b []byte) (int, error) {
+ if len(h.data) >= h.limit {
+ return len(b), nil
+ }
+
+ if len(h.data)+len(b) > h.limit {
+ h.data = append(h.data, b[:h.limit-len(h.data)]...)
+ return len(b), nil
+ }
+
+ h.data = append(h.data, b...)
+ return len(b), nil
+}
+
+func (h *Writer) Head() []byte { return h.data }
diff --git a/pkg/internal/utilities/headwriter/headwriter_test.go b/pkg/internal/utilities/headwriter/headwriter_test.go
new file mode 100644
index 0000000..d524c68
--- /dev/null
+++ b/pkg/internal/utilities/headwriter/headwriter_test.go
@@ -0,0 +1,52 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package headwriter
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestHeadWriter(t *testing.T) {
+ lw := New(10)
+
+ n, err := lw.Write([]byte{0, 1, 2, 3})
+ require.NoError(t, err)
+ require.Equal(t, 4, n)
+ require.Equal(t, []byte{0, 1, 2, 3}, lw.Head())
+
+ n, err = lw.Write([]byte{})
+ require.NoError(t, err)
+ require.Equal(t, 0, n)
+ require.Equal(t, []byte{0, 1, 2, 3}, lw.Head())
+
+ n, err = lw.Write([]byte{4, 5, 6, 7})
+ require.NoError(t, err)
+ require.Equal(t, 4, n)
+ require.Equal(t, []byte{0, 1, 2, 3, 4, 5, 6, 7}, lw.Head())
+
+ n, err = lw.Write([]byte{8, 9, 10})
+ require.NoError(t, err)
+ require.Equal(t, 3, n)
+ require.Equal(t, []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, lw.Head())
+
+ n, err = lw.Write([]byte{11, 12, 13, 14})
+ require.NoError(t, err)
+ require.Equal(t, 4, n)
+ require.Equal(t, []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, lw.Head())
+}
diff --git a/pkg/protobuf/protobuf.go b/pkg/protobuf/protobuf.go
deleted file mode 100644
index 8dae6e1..0000000
--- a/pkg/protobuf/protobuf.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
-Copyright © 2022 Bartłomiej Święcki (byo)
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package protobuf
-
-//go:generate protoc --go_out=. protobuf.proto
-
-import (
- "errors"
- "time"
-
- "google.golang.org/protobuf/proto"
-)
-
-var (
- ErrInvalidEntrypoint = errors.New("invalid entrypoint")
- ErrInvalidEntrypointTime = errors.New("%w: time validation failed")
-)
-
-func (ep *Entrypoint) ToBytes() ([]byte, error) {
- return proto.Marshal(ep)
-}
-
-func EntryPointFromBytes(b []byte) (*Entrypoint, error) {
- ret := &Entrypoint{}
- err := proto.Unmarshal(b, ret)
- if err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-func (wi *WriterInfo) ToBytes() ([]byte, error) {
- return proto.Marshal(wi)
-}
-
-func WriterInfoFromBytes(b []byte) (*WriterInfo, error) {
- ret := &WriterInfo{}
- err := proto.Unmarshal(b, ret)
- if err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-func (ep *Entrypoint) Validate(currentTime time.Time) error {
- currentTimeMicro := currentTime.UnixMicro()
-
- if ep.GetNotValidAfterUnixMicro() != 0 &&
- currentTimeMicro > ep.GetNotValidAfterUnixMicro() {
- return ErrInvalidEntrypointTime
- }
-
- if ep.GetNotValidBeforeUnixMicro() != 0 &&
- currentTimeMicro < ep.GetNotValidBeforeUnixMicro() {
- return ErrInvalidEntrypointTime
- }
-
- return nil
-}
diff --git a/pkg/structure/cinodefs.go b/pkg/structure/cinodefs.go
deleted file mode 100644
index e9e9cd4..0000000
--- a/pkg/structure/cinodefs.go
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
-Copyright © 2023 Bartłomiej Święcki (byo)
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package structure
-
-import (
- "context"
- "io"
- "strings"
- "time"
-
- "github.com/cinode/go/pkg/blenc"
- "github.com/cinode/go/pkg/common"
- "github.com/cinode/go/pkg/internal/utilities/cipherfactory"
- "github.com/cinode/go/pkg/protobuf"
- "google.golang.org/protobuf/proto"
-)
-
-type CinodeFS struct {
- BE blenc.BE
- RootEntrypoint *protobuf.Entrypoint
- MaxLinkRedirects int
- CurrentTimeF func() time.Time
-}
-
-func (d *CinodeFS) OpenContent(ctx context.Context, ep *protobuf.Entrypoint) (io.ReadCloser, error) {
- return d.BE.Open(
- ctx,
- common.BlobName(ep.BlobName),
- cipherfactory.Key(ep.GetKeyInfo().GetKey()),
- )
-}
-
-func (d *CinodeFS) FindEntrypoint(ctx context.Context, path string) (*protobuf.Entrypoint, error) {
- return d.findEntrypointInDir(ctx, d.RootEntrypoint, path, d.currentTime())
-}
-
-func (d *CinodeFS) findEntrypointInDir(
- ctx context.Context,
- ep *protobuf.Entrypoint,
- remainingPath string,
- currentTime time.Time,
-) (
- *protobuf.Entrypoint,
- error,
-) {
- ep, err := DereferenceLink(ctx, d.BE, ep, d.MaxLinkRedirects, currentTime)
- if err != nil {
- return nil, err
- }
-
- if ep.MimeType != CinodeDirMimeType {
- return nil, ErrNotADirectory
- }
-
- rc, err := d.OpenContent(ctx, ep)
- if err != nil {
- return nil, err
- }
- defer rc.Close()
-
- data, err := io.ReadAll(rc)
- if err != nil {
- return nil, err
- }
-
- dirStruct := protobuf.Directory{}
- err = proto.Unmarshal(data, &dirStruct)
- if err != nil {
- return nil, err
- }
-
- pathParts := strings.SplitN(remainingPath, "/", 2)
- entryName := pathParts[0]
- var entry *protobuf.Entrypoint
- var exists bool
- for _, dirEntry := range dirStruct.GetEntries() {
- if entryName != dirEntry.GetName() {
- continue
- }
- if exists {
- // Doubled entry - reject such directory structure
- // to avoid ambiguity-based attacks
- return nil, ErrCorruptedLinkData
- }
- exists = true
- entry = dirEntry.GetEp()
- }
- if !exists {
- return nil, ErrNotFound
- }
-
- if len(pathParts) == 1 {
- // Found the entry, no need to descend any further, only dereference the link
- entry, err = DereferenceLink(ctx, d.BE, entry, d.MaxLinkRedirects, currentTime)
- if err != nil {
- return nil, err
- }
- return entry, nil
- }
-
- return d.findEntrypointInDir(ctx, entry, pathParts[1], currentTime)
-}
-
-func (d *CinodeFS) currentTime() time.Time {
- if d.CurrentTimeF != nil {
- return d.CurrentTimeF()
- }
- return time.Now()
-}
diff --git a/pkg/structure/directory.go b/pkg/structure/directory.go
deleted file mode 100644
index 344c3ca..0000000
--- a/pkg/structure/directory.go
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
-Copyright © 2023 Bartłomiej Święcki (byo)
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package structure
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "html/template"
- "io"
- "io/fs"
- "mime"
- "net/http"
- "path"
- "path/filepath"
- "sort"
-
- _ "embed"
-
- "github.com/cinode/go/pkg/blenc"
- "github.com/cinode/go/pkg/blobtypes"
- "github.com/cinode/go/pkg/common"
- "github.com/cinode/go/pkg/protobuf"
- "github.com/cinode/go/pkg/utilities/golang"
- "golang.org/x/exp/slog"
- "google.golang.org/protobuf/proto"
-)
-
-const (
- CinodeDirMimeType = "application/cinode-dir"
-)
-
-var (
- ErrNotFound = blenc.ErrNotFound
- ErrNotADirectory = errors.New("entry is not a directory")
- ErrNotAFile = errors.New("entry is not a file")
-)
-
-func UploadStaticDirectory(ctx context.Context, log *slog.Logger, fsys fs.FS, be blenc.BE) (*protobuf.Entrypoint, error) {
- c := dirCompiler{
- ctx: ctx,
- fsys: fsys,
- be: be,
- log: log,
- }
-
- return c.compilePath(ctx, ".")
-}
-
-type headWriter struct {
- limit int
- data []byte
-}
-
-func newHeadWriter(limit int) headWriter {
- return headWriter{
- limit: limit,
- data: make([]byte, limit),
- }
-}
-
-func (h *headWriter) Write(b []byte) (int, error) {
- if len(h.data) >= h.limit {
- return len(b), nil
- }
-
- if len(h.data)+len(b) > h.limit {
- h.data = append(h.data, b[:h.limit-len(h.data)]...)
- return len(b), nil
- }
-
- h.data = append(h.data, b...)
- return len(b), nil
-}
-
-type dirCompiler struct {
- ctx context.Context
- fsys fs.FS
- be blenc.BE
- log *slog.Logger
-}
-
-func (d *dirCompiler) compilePath(ctx context.Context, path string) (*protobuf.Entrypoint, error) {
- st, err := fs.Stat(d.fsys, path)
- if err != nil {
- d.log.DebugContext(ctx, "failed to stat path", "path", path, "err", err)
- return nil, fmt.Errorf("couldn't check path: %w", err)
- }
-
- if st.IsDir() {
- return d.compileDir(ctx, path)
- }
-
- if st.Mode().IsRegular() {
- return d.compileFile(ctx, path)
- }
-
- d.log.ErrorContext(ctx, "path is neither dir nor a regular file", "path", path)
- return nil, fmt.Errorf("neither dir nor a regular file: %v", path)
-}
-
-// UploadStaticBlob uploads blob to the associated datastore and returns entrypoint to that file
-//
-// if mimeType is an empty string, it will be guessed from the content defaulting to
-func UploadStaticBlob(ctx context.Context, be blenc.BE, r io.Reader, mimeType string, log *slog.Logger) (*protobuf.Entrypoint, error) {
- // Use the dataHead to store first 512 bytes of data into a buffer while uploading it to the blenc layer
- // This buffer may then be used to detect the mime type
- dataHead := newHeadWriter(512)
-
- bn, ki, _, err := be.Create(context.Background(), blobtypes.Static, io.TeeReader(r, &dataHead))
- if err != nil {
- log.ErrorContext(ctx, "failed to upload static file", "err", err)
- return nil, err
- }
-
- log.DebugContext(ctx, "static file uploaded successfully")
-
- if mimeType == "" {
- mimeType = http.DetectContentType(dataHead.data)
- log.DebugContext(ctx, "automatically detected content type", "contentType", mimeType)
- }
-
- return &protobuf.Entrypoint{
- BlobName: bn,
- KeyInfo: &protobuf.KeyInfo{Key: ki},
- MimeType: mimeType,
- }, nil
-}
-
-func (d *dirCompiler) compileFile(ctx context.Context, path string) (*protobuf.Entrypoint, error) {
- d.log.InfoContext(ctx, "compiling file", "path", path)
- fl, err := d.fsys.Open(path)
- if err != nil {
- d.log.ErrorContext(ctx, "failed to open file", "path", path, "err", err)
- return nil, fmt.Errorf("couldn't open file %v: %w", path, err)
- }
- defer fl.Close()
-
- ep, err := UploadStaticBlob(
- ctx,
- d.be,
- fl,
- mime.TypeByExtension(filepath.Ext(path)),
- d.log.With("path", path),
- )
- if err != nil {
- return nil, fmt.Errorf("failed to upload file %v: %w", path, err)
- }
-
- return ep, nil
-}
-
-func (d *dirCompiler) compileDir(ctx context.Context, p string) (*protobuf.Entrypoint, error) {
- fileList, err := fs.ReadDir(d.fsys, p)
- if err != nil {
- d.log.ErrorContext(ctx, "couldn't read contents of dir", "path", p, "err", err)
- return nil, fmt.Errorf("couldn't read contents of dir %v: %w", p, err)
- }
-
- dir := StaticDir{}
- for _, e := range fileList {
- subPath := path.Join(p, e.Name())
-
- ep, err := d.compilePath(ctx, subPath)
- if err != nil {
- return nil, err
- }
-
- dir.SetEntry(e.Name(), ep)
- }
-
- ep, err := dir.GenerateEntrypoint(context.Background(), d.be)
- if err != nil {
- d.log.ErrorContext(ctx, "failed to serialize directory", "path", p, "err", err)
- return nil, fmt.Errorf("can not serialize directory %v: %w", p, err)
- }
-
- d.log.DebugContext(ctx,
- "directory uploaded successfully", "path", p,
- "blobName", common.BlobName(ep.BlobName).String(),
- )
- return ep, nil
-}
-
-type StaticDir struct {
- entries map[string]*protobuf.Entrypoint
-}
-
-func (s *StaticDir) SetEntry(name string, ep *protobuf.Entrypoint) {
- if s.entries == nil {
- s.entries = map[string]*protobuf.Entrypoint{}
- }
- s.entries[name] = ep
-}
-
-//go:embed templates/dir.html
-var _dirIndexTemplateStr string
-var dirIndexTemplate = golang.Must(
- template.New("dir").
- Funcs(template.FuncMap{
- "isDir": func(entry *protobuf.Entrypoint) bool {
- return entry.MimeType == CinodeDirMimeType
- },
- }).
- Parse(_dirIndexTemplateStr),
-)
-
-func (s *StaticDir) GenerateIndex(ctx context.Context, log *slog.Logger, indexName string, be blenc.BE) error {
- buf := bytes.NewBuffer(nil)
- err := dirIndexTemplate.Execute(buf, map[string]any{
- "entries": s.getProtobufData().GetEntries(),
- "indexName": indexName,
- })
- if err != nil {
- return err
- }
-
- ep, err := UploadStaticBlob(ctx, be, bytes.NewReader(buf.Bytes()), "text/html", log)
- if err != nil {
- return err
- }
-
- s.entries[indexName] = ep
- return nil
-}
-
-func (s *StaticDir) getProtobufData() *protobuf.Directory {
- // Convert to protobuf format
- protoData := protobuf.Directory{
- Entries: make([]*protobuf.Directory_Entry, 0, len(s.entries)),
- }
- for name, ep := range s.entries {
- protoData.Entries = append(protoData.Entries, &protobuf.Directory_Entry{
- Name: name,
- Ep: ep,
- })
- }
-
- // Sort by name
- sort.Slice(protoData.Entries, func(i, j int) bool {
- return protoData.Entries[i].Name < protoData.Entries[j].Name
- })
-
- return &protoData
-}
-
-func (s *StaticDir) GenerateEntrypoint(ctx context.Context, be blenc.BE) (*protobuf.Entrypoint, error) {
- // TODO: Introduce various directory split strategies
- data, err := proto.Marshal(s.getProtobufData())
- if err != nil {
- return nil, err
- }
-
- bn, ki, _, err := be.Create(context.Background(), blobtypes.Static, bytes.NewReader(data))
- if err != nil {
- return nil, err
- }
-
- return &protobuf.Entrypoint{
- BlobName: bn,
- KeyInfo: &protobuf.KeyInfo{Key: ki},
- MimeType: CinodeDirMimeType,
- }, nil
-}
diff --git a/pkg/structure/http.go b/pkg/structure/http.go
deleted file mode 100644
index 176ec59..0000000
--- a/pkg/structure/http.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-Copyright © 2023 Bartłomiej Święcki (byo)
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package structure
-
-import (
- "errors"
- "io"
- "log"
- "net/http"
- "strings"
-)
-
-type HTTPHandler struct {
- FS *CinodeFS
- IndexFile string
-}
-
-func (h *HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if r.Method != "GET" {
- http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
- return
- }
-
- path := r.URL.Path
- if strings.HasSuffix(path, "/") {
- path += h.IndexFile
- }
- path = strings.TrimPrefix(path, "/")
-
- fileEP, err := h.FS.FindEntrypoint(r.Context(), path)
- switch {
- case errors.Is(err, ErrNotFound):
- http.NotFound(w, r)
- return
- case err != nil:
- log.Println("Error serving request:", err)
- http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
- return
- }
-
- if fileEP.MimeType == CinodeDirMimeType {
- http.Redirect(w, r, r.URL.Path+"/", http.StatusPermanentRedirect)
- return
- }
-
- w.Header().Set("Content-Type", fileEP.GetMimeType())
- rc, err := h.FS.OpenContent(r.Context(), fileEP)
- if err != nil {
- log.Printf("Error sending file: %v", err)
- }
- defer rc.Close()
-
- _, err = io.Copy(w, rc)
- if err != nil {
- log.Printf("Error sending file: %v", err)
- }
-
-}
diff --git a/pkg/structure/link.go b/pkg/structure/link.go
deleted file mode 100644
index e9d2b44..0000000
--- a/pkg/structure/link.go
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
-Copyright © 2023 Bartłomiej Święcki (byo)
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package structure
-
-import (
- "bytes"
- "context"
- "errors"
- "io"
- "time"
-
- "github.com/cinode/go/pkg/blenc"
- "github.com/cinode/go/pkg/blobtypes"
- "github.com/cinode/go/pkg/common"
- "github.com/cinode/go/pkg/internal/utilities/cipherfactory"
- "github.com/cinode/go/pkg/protobuf"
-)
-
-var (
- ErrMaxRedirectsReached = errors.New("maximum limit of dynamic link redirects reached")
- ErrCorruptedLinkData = errors.New("corrupted link data")
- ErrCorruptedDirectoryData = errors.New("corrupted directory data")
- ErrInvalidEntrypoint = protobuf.ErrInvalidEntrypoint
- ErrInvalidEntrypointTime = protobuf.ErrInvalidEntrypointTime
-)
-
-func CreateLink(ctx context.Context, be blenc.BE, ep *protobuf.Entrypoint) (*protobuf.Entrypoint, *protobuf.WriterInfo, error) {
- epBytes, err := ep.ToBytes()
- if err != nil {
- return nil, nil, err
- }
-
- name, key, authInfo, err := be.Create(ctx, blobtypes.DynamicLink, bytes.NewReader(epBytes))
- if err != nil {
- return nil, nil, err
- }
-
- return &protobuf.Entrypoint{
- BlobName: name,
- KeyInfo: &protobuf.KeyInfo{
- Key: key,
- },
- }, &protobuf.WriterInfo{
- BlobName: name,
- Key: key,
- AuthInfo: authInfo,
- }, nil
-}
-
-func UpdateLink(ctx context.Context, be blenc.BE, wi *protobuf.WriterInfo, ep *protobuf.Entrypoint) (*protobuf.Entrypoint, error) {
- epBytes, err := ep.ToBytes()
- if err != nil {
- return nil, err
- }
-
- err = be.Update(ctx, wi.BlobName, wi.AuthInfo, wi.Key, bytes.NewReader(epBytes))
- if err != nil {
- return nil, err
- }
-
- return &protobuf.Entrypoint{
- BlobName: wi.BlobName,
- KeyInfo: &protobuf.KeyInfo{
- Key: wi.Key,
- },
- }, nil
-}
-
-func DereferenceLink(
- ctx context.Context,
- be blenc.BE,
- link *protobuf.Entrypoint,
- maxRedirects int,
- currentTime time.Time,
-) (
- *protobuf.Entrypoint,
- error,
-) {
- err := link.Validate(currentTime)
- if err != nil {
- return nil, err
- }
-
- for common.BlobName(link.BlobName).Type() == blobtypes.DynamicLink {
- if maxRedirects == 0 {
- return nil, ErrMaxRedirectsReached
- }
- maxRedirects--
-
- rc, err := be.Open(
- ctx,
- common.BlobName(link.BlobName),
- cipherfactory.Key(link.GetKeyInfo().GetKey()),
- )
- if err != nil {
- return nil, err
- }
- defer rc.Close()
-
- // TODO: Constrain the buffer size
- data, err := io.ReadAll(rc)
- if err != nil {
- return nil, err
- }
-
- link, err = protobuf.EntryPointFromBytes(data)
- if err != nil {
- return nil, err
- }
-
- err = link.Validate(time.Now())
- if err != nil {
- return nil, err
- }
- }
-
- return link, nil
-}
diff --git a/pkg/structure/templates/dir.html b/pkg/structure/templates/dir.html
deleted file mode 100644
index f5ad40a..0000000
--- a/pkg/structure/templates/dir.html
+++ /dev/null
@@ -1,73 +0,0 @@
-{{/*
-Copyright © 2023 Bartłomiej Święcki (byo)
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/}}
-
-
-
-
- Directory Listing
-
-
-
-
- Directory Listing
-
-
- Dir |
- Name |
- MimeType |
-
- {{range .entries}}{{if isDir .Ep}}{{if ne .Name $.indexName}}
-
- [DIR] |
- {{.Name}} |
- {{.Ep.MimeType}} |
-
- {{end}}{{end}}{{end}}
- {{range .entries}}{{if not (isDir .Ep) }}{{if ne .Name $.indexName}}
-
- |
- {{.Name}} |
- {{.Ep.MimeType}} |
-
- {{end}}{{end}}{{end}}
-
-
-
-
diff --git a/pkg/internal/utilities/cipherfactory/types.go b/pkg/utilities/golang/assert.go
similarity index 84%
rename from pkg/internal/utilities/cipherfactory/types.go
rename to pkg/utilities/golang/assert.go
index af03f8b..220e3c0 100644
--- a/pkg/internal/utilities/cipherfactory/types.go
+++ b/pkg/utilities/golang/assert.go
@@ -14,10 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package cipherfactory
+package golang
-// Key with cipher type
-type Key []byte
-
-// IV
-type IV []byte
+func Assert(b bool, message string) {
+ if !b {
+ panic("Assertion failed: " + message)
+ }
+}
diff --git a/pkg/utilities/golang/assert_test.go b/pkg/utilities/golang/assert_test.go
new file mode 100644
index 0000000..55b3879
--- /dev/null
+++ b/pkg/utilities/golang/assert_test.go
@@ -0,0 +1,32 @@
+/*
+Copyright © 2023 Bartłomiej Święcki (byo)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package golang
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestAssert(t *testing.T) {
+ require.NotPanics(t, func() {
+ Assert(true, "must not happen")
+ })
+ require.Panics(t, func() {
+ Assert(false, "must panic")
+ })
+}
diff --git a/testvectors/testblobs/base.go b/testvectors/testblobs/base.go
index cdd730b..ee1c107 100644
--- a/testvectors/testblobs/base.go
+++ b/testvectors/testblobs/base.go
@@ -23,14 +23,14 @@ import (
"net/http"
"net/url"
- "github.com/cinode/go/pkg/protobuf"
- "github.com/jbenet/go-base58"
+ "github.com/cinode/go/pkg/cinodefs"
+ "github.com/cinode/go/pkg/common"
)
type TestBlob struct {
UpdateDataset []byte
- BlobName []byte
- EncryptionKey []byte
+ BlobName *common.BlobName
+ EncryptionKey *common.BlobKey
DecryptedDataset []byte
}
@@ -39,7 +39,7 @@ func (s *TestBlob) Put(baseUrl string) error {
}
func (s *TestBlob) PutWithAuth(baseUrl, username, password string) error {
- finalUrl, err := url.JoinPath(baseUrl, base58.Encode(s.BlobName))
+ finalUrl, err := url.JoinPath(baseUrl, s.BlobName.String())
if err != nil {
return err
}
@@ -75,7 +75,7 @@ func (s *TestBlob) PutWithAuth(baseUrl, username, password string) error {
}
func (s *TestBlob) Get(baseUrl string) ([]byte, error) {
- finalUrl, err := url.JoinPath(baseUrl, base58.Encode(s.BlobName))
+ finalUrl, err := url.JoinPath(baseUrl, s.BlobName.String())
if err != nil {
return nil, err
}
@@ -98,11 +98,9 @@ func (s *TestBlob) Get(baseUrl string) ([]byte, error) {
return body, nil
}
-func (s *TestBlob) Entrypoint() *protobuf.Entrypoint {
- return &protobuf.Entrypoint{
- BlobName: s.BlobName,
- KeyInfo: &protobuf.KeyInfo{
- Key: s.EncryptionKey,
- },
- }
+func (s *TestBlob) Entrypoint() *cinodefs.Entrypoint {
+ return cinodefs.EntrypointFromBlobNameAndKey(
+ s.BlobName,
+ s.EncryptionKey,
+ )
}
diff --git a/testvectors/testblobs/dynamiclink.go b/testvectors/testblobs/dynamiclink.go
index be15f2e..422d226 100644
--- a/testvectors/testblobs/dynamiclink.go
+++ b/testvectors/testblobs/dynamiclink.go
@@ -1,5 +1,10 @@
package testblobs
+import (
+ "github.com/cinode/go/pkg/common"
+ "github.com/cinode/go/pkg/utilities/golang"
+)
+
var DynamicLink = TestBlob{
[]byte{
0x00, 0x11, 0xDA, 0xDD, 0x0F, 0x94, 0xBE, 0xCA,
@@ -30,20 +35,20 @@ var DynamicLink = TestBlob{
0xD8, 0x3F, 0xDD, 0xB1, 0x1F, 0x22, 0x7C, 0xD9,
0x73, 0xCA, 0x26, 0x11, 0x29, 0x79, 0x03, 0xF9,
},
- []byte{
+ golang.Must(common.BlobNameFromBytes([]byte{
0x4F, 0xDA, 0x7E, 0xE6, 0xF2, 0x71, 0xB5, 0xEF,
0xFF, 0xD2, 0x05, 0x27, 0x0B, 0xBA, 0x11, 0x13,
0x13, 0xF5, 0xC9, 0x06, 0x9D, 0x6C, 0x36, 0x5F,
0x80, 0xD3, 0x50, 0xE3, 0xC5, 0x9B, 0x0E, 0x8D,
0xE6,
- },
- []byte{
+ })),
+ common.BlobKeyFromBytes([]byte{
0x00, 0x79, 0xD2, 0x68, 0xC8, 0xEB, 0xD6, 0xA1,
0xBD, 0x5D, 0xE8, 0x63, 0x1C, 0xF7, 0x73, 0x73,
0x77, 0x26, 0x99, 0x4E, 0xC7, 0x35, 0xD9, 0x81,
0xB5, 0x20, 0xA8, 0xD7, 0xD9, 0x0B, 0xD5, 0x05,
0x49,
- },
+ }),
[]byte{
0x64, 0x79, 0x6E, 0x61, 0x6D, 0x69, 0x63, 0x20,
0x6C, 0x69, 0x6E, 0x6B,