Skip to content

Commit

Permalink
Merge branch 'master' into relay-timeouts
Browse files Browse the repository at this point in the history
Signed-off-by: Cody Littley <[email protected]>
  • Loading branch information
cody-littley committed Nov 25, 2024
2 parents 46b02e2 + 0a4e852 commit b24f23d
Show file tree
Hide file tree
Showing 69 changed files with 2,437 additions and 585 deletions.
22 changes: 20 additions & 2 deletions common/aws/s3/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"bytes"
"context"
"errors"
"fmt"
"runtime"
"sync"

Expand Down Expand Up @@ -105,15 +106,32 @@ func NewClient(ctx context.Context, cfg commonaws.ClientConfig, logger logging.L
return ref, err
}

func PeekObjectSize(ctx context.Context, s3Client *s3.Client, bucket, key string) (int64, error) {
input := &s3.HeadObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
}
result, err := s3Client.HeadObject(ctx, input)
if err != nil {
return 0, fmt.Errorf("failed to head object: %w", err)
}
return *result.ContentLength, nil
}

func (s *client) DownloadObject(ctx context.Context, bucket string, key string) ([]byte, error) {
size, err := PeekObjectSize(ctx, s.s3Client, bucket, key)
if err != nil {
return nil, err
}
buffer := manager.NewWriteAtBuffer(make([]byte, 0, size))

var partMiBs int64 = 10
downloader := manager.NewDownloader(s.s3Client, func(d *manager.Downloader) {
d.PartSize = partMiBs * 1024 * 1024 // 10MB per part
d.Concurrency = 3 //The number of goroutines to spin up in parallel per call to Upload when sending parts
})

buffer := manager.NewWriteAtBuffer([]byte{})
_, err := downloader.Download(ctx, buffer, &s3.GetObjectInput{
_, err = downloader.Download(ctx, buffer, &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
Expand Down
26 changes: 26 additions & 0 deletions common/read_only_map.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package common

type ReadOnlyMap[K comparable, V any] struct {
data map[K]V
}

func NewReadOnlyMap[K comparable, V any](data map[K]V) *ReadOnlyMap[K, V] {
return &ReadOnlyMap[K, V]{data: data}
}

func (m *ReadOnlyMap[K, V]) Get(key K) (V, bool) {
value, ok := m.data[key]
return value, ok
}

func (m *ReadOnlyMap[K, V]) Keys() []K {
keys := make([]K, 0, len(m.data))
for key := range m.data {
keys = append(keys, key)
}
return keys
}

func (m *ReadOnlyMap[K, V]) Len() int {
return len(m.data)
}
31 changes: 31 additions & 0 deletions common/read_only_map_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
package common_test

import (
"testing"

"github.com/Layr-Labs/eigenda/common"
"github.com/stretchr/testify/require"
)

func TestReadOnlyMap(t *testing.T) {
data := map[uint8]string{
1: "one",
2: "two",
3: "three",
}
m := common.NewReadOnlyMap(data)
res, ok := m.Get(1)
require.True(t, ok)
require.Equal(t, "one", res)
res, ok = m.Get(2)
require.True(t, ok)
require.Equal(t, "two", res)
res, ok = m.Get(3)
require.True(t, ok)
require.Equal(t, "three", res)
res, ok = m.Get(4)
require.False(t, ok)
require.Equal(t, "", res)
require.Equal(t, 3, m.Len())
require.ElementsMatch(t, []uint8{1, 2, 3}, m.Keys())
}
260 changes: 245 additions & 15 deletions contracts/bindings/EigenDAServiceManager/binding.go

Large diffs are not rendered by default.

449 changes: 449 additions & 0 deletions contracts/bindings/IEigenDARelayRegistry/binding.go

Large diffs are not rendered by default.

213 changes: 180 additions & 33 deletions contracts/bindings/IEigenDAServiceManager/binding.go

Large diffs are not rendered by default.

48 changes: 24 additions & 24 deletions contracts/bindings/MockRollup/binding.go

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion contracts/compile.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ function create_binding {
forge clean
forge build

contracts="PaymentVault SocketRegistry AVSDirectory DelegationManager BitmapUtils OperatorStateRetriever RegistryCoordinator BLSApkRegistry IndexRegistry StakeRegistry BN254 EigenDAServiceManager IEigenDAServiceManager MockRollup EjectionManager"
contracts="PaymentVault SocketRegistry AVSDirectory DelegationManager BitmapUtils OperatorStateRetriever RegistryCoordinator BLSApkRegistry IndexRegistry StakeRegistry BN254 EigenDAServiceManager IEigenDAServiceManager MockRollup EjectionManager IEigenDARelayRegistry"
for contract in $contracts; do
create_binding ./ $contract ./bindings
done
Expand Down
6 changes: 6 additions & 0 deletions core/chainio.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,12 @@ type Reader interface {
// GetRequiredQuorumNumbers returns set of required quorum numbers
GetRequiredQuorumNumbers(ctx context.Context, blockNumber uint32) ([]QuorumID, error)

// GetVersionedBlobParams returns the blob version parameters for the given block number and blob version.
GetVersionedBlobParams(ctx context.Context, blobVersion uint8) (*BlobVersionParameters, error)

// GetAllVersionedBlobParams returns the blob version parameters for all blob versions at the given block number.
GetAllVersionedBlobParams(ctx context.Context) (map[uint8]*BlobVersionParameters, error)

// GetActiveReservations returns active reservations (end timestamp > current timestamp)
GetActiveReservations(ctx context.Context, blockNumber uint32, accountIDs []string) (map[string]ActiveReservation, error)

Expand Down
6 changes: 6 additions & 0 deletions core/data.go
Original file line number Diff line number Diff line change
Expand Up @@ -612,3 +612,9 @@ type ActiveReservation struct {
type OnDemandPayment struct {
CumulativePayment *big.Int // Total amount deposited by the user
}

type BlobVersionParameters struct {
CodingRate uint32
MaxNumOperators uint32
NumChunks uint32
}
36 changes: 36 additions & 0 deletions core/eth/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"crypto/ecdsa"
"math/big"
"strings"

"github.com/Layr-Labs/eigenda/common"
avsdir "github.com/Layr-Labs/eigenda/contracts/bindings/AVSDirectory"
Expand Down Expand Up @@ -580,6 +581,41 @@ func (t *Reader) GetRequiredQuorumNumbers(ctx context.Context, blockNumber uint3
return requiredQuorums, nil
}

func (t *Reader) GetVersionedBlobParams(ctx context.Context, blobVersion uint8) (*core.BlobVersionParameters, error) {
params, err := t.bindings.EigenDAServiceManager.GetBlobParams(&bind.CallOpts{
Context: ctx,
}, uint16(blobVersion))
if err != nil {
return nil, err
}
return &core.BlobVersionParameters{
CodingRate: uint32(params.CodingRate),
NumChunks: uint32(params.NumChunks),
MaxNumOperators: uint32(params.MaxNumOperators),
}, nil
}

func (t *Reader) GetAllVersionedBlobParams(ctx context.Context) (map[uint8]*core.BlobVersionParameters, error) {
res := make(map[uint8]*core.BlobVersionParameters)
version := uint8(0)
for {
params, err := t.GetVersionedBlobParams(ctx, version)
if err != nil && strings.Contains(err.Error(), "execution reverted") {
break
} else if err != nil {
return nil, err
}
res[version] = params
version++
}

if len(res) == 0 {
return nil, errors.New("no blob version parameters found")
}

return res, nil
}

func (t *Reader) GetActiveReservations(ctx context.Context, blockNumber uint32, accountIDs []string) (map[string]core.ActiveReservation, error) {
// contract is not implemented yet
return map[string]core.ActiveReservation{}, nil
Expand Down
2 changes: 1 addition & 1 deletion core/eth/writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ func (t *Writer) BuildConfirmBatchTxn(ctx context.Context, batchHeader *core.Bat
}

signedStakeForQuorums := serializeSignedStakeForQuorums(quorums)
batchH := eigendasrvmg.IEigenDAServiceManagerBatchHeader{
batchH := eigendasrvmg.BatchHeader{
BlobHeadersRoot: batchHeader.BatchRoot,
QuorumNumbers: quorumNumbers,
SignedStakeForQuorums: signedStakeForQuorums,
Expand Down
2 changes: 1 addition & 1 deletion core/mock/v2/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func (v *MockShardValidator) ValidateBatchHeader(ctx context.Context, header *co
return args.Error(0)
}

func (v *MockShardValidator) ValidateBlobs(ctx context.Context, blobs []*corev2.BlobShard, pool common.WorkerPool, state *core.OperatorState) error {
func (v *MockShardValidator) ValidateBlobs(ctx context.Context, blobs []*corev2.BlobShard, blobVersionParams *corev2.BlobVersionParameterMap, pool common.WorkerPool, state *core.OperatorState) error {
args := v.Called()
return args.Error(0)
}
15 changes: 15 additions & 0 deletions core/mock/writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,21 @@ func (t *MockWriter) GetRequiredQuorumNumbers(ctx context.Context, blockNumber u
return result.([]uint8), args.Error(1)
}

func (t *MockWriter) GetVersionedBlobParams(ctx context.Context, blobVersion uint8) (*core.BlobVersionParameters, error) {
args := t.Called()
if args.Get(0) == nil {
return nil, args.Error(1)
}
result := args.Get(0)
return result.(*core.BlobVersionParameters), args.Error(1)
}

func (t *MockWriter) GetAllVersionedBlobParams(ctx context.Context) (map[uint8]*core.BlobVersionParameters, error) {
args := t.Called()
result := args.Get(0)
return result.(map[uint8]*core.BlobVersionParameters), args.Error(1)
}

func (t *MockWriter) PubkeyHashToOperator(ctx context.Context, operatorId core.OperatorID) (gethcommon.Address, error) {
args := t.Called()
result := args.Get(0)
Expand Down
2 changes: 1 addition & 1 deletion core/serialization.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func (h BatchHeader) GetBatchHeaderHash() ([32]byte, error) {

// HashBatchHeader returns the hash of the BatchHeader that is used to emit the BatchConfirmed event
// ref: https://github.com/Layr-Labs/eigenda/blob/master/contracts/src/libraries/EigenDAHasher.sol#L57
func HashBatchHeader(batchHeader binding.IEigenDAServiceManagerBatchHeader) ([32]byte, error) {
func HashBatchHeader(batchHeader binding.BatchHeader) ([32]byte, error) {
// The order here has to match the field ordering of BatchHeader defined in IEigenDAServiceManager.sol
batchHeaderType, err := abi.NewType("tuple", "", []abi.ArgumentMarshaling{
{
Expand Down
2 changes: 1 addition & 1 deletion core/serialization_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func TestBatchHeaderEncoding(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, hexutil.Encode(hash[:]), reducedBatchHeaderHash)

onchainBatchHeader := binding.IEigenDAServiceManagerBatchHeader{
onchainBatchHeader := binding.BatchHeader{
BlobHeadersRoot: batchRoot,
QuorumNumbers: []byte{0},
SignedStakeForQuorums: []byte{100},
Expand Down
39 changes: 19 additions & 20 deletions core/v2/assignment.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,24 +8,22 @@ import (
"github.com/Layr-Labs/eigenda/core"
)

func GetAssignments(state *core.OperatorState, blobVersion BlobVersion, quorum uint8) (map[core.OperatorID]Assignment, error) {

params, ok := ParametersMap[blobVersion]
if !ok {
return nil, fmt.Errorf("blob version %d not found", blobVersion)
func GetAssignments(state *core.OperatorState, blobParams *core.BlobVersionParameters, quorum uint8) (map[core.OperatorID]Assignment, error) {
if blobParams == nil {
return nil, fmt.Errorf("blob params cannot be nil")
}

ops, ok := state.Operators[quorum]
if !ok {
return nil, fmt.Errorf("no operators found for quorum %d", quorum)
}

if len(ops) > int(params.MaxNumOperators()) {
return nil, fmt.Errorf("too many operators for blob version %d", blobVersion)
if uint32(len(ops)) > blobParams.MaxNumOperators {
return nil, fmt.Errorf("too many operators (%d) to get assignments: max number of operators is %d", len(ops), blobParams.MaxNumOperators)
}

numOperators := big.NewInt(int64(len(ops)))
numChunks := big.NewInt(int64(params.NumChunks))
numChunks := big.NewInt(int64(blobParams.NumChunks))

type assignment struct {
id core.OperatorID
Expand Down Expand Up @@ -58,9 +56,9 @@ func GetAssignments(state *core.OperatorState, blobVersion BlobVersion, quorum u
mp += int(a.chunks)
}

delta := int(params.NumChunks) - mp
delta := int(blobParams.NumChunks) - mp
if delta < 0 {
return nil, fmt.Errorf("total chunks %d exceeds maximum %d", mp, params.NumChunks)
return nil, fmt.Errorf("total chunks %d exceeds maximum %d", mp, blobParams.NumChunks)
}

assignments := make(map[core.OperatorID]Assignment, len(chunkAssignments))
Expand All @@ -81,9 +79,11 @@ func GetAssignments(state *core.OperatorState, blobVersion BlobVersion, quorum u

}

func GetAssignment(state *core.OperatorState, blobVersion BlobVersion, quorum core.QuorumID, id core.OperatorID) (Assignment, error) {

assignments, err := GetAssignments(state, blobVersion, quorum)
func GetAssignment(state *core.OperatorState, blobParams *core.BlobVersionParameters, quorum core.QuorumID, id core.OperatorID) (Assignment, error) {
if blobParams == nil {
return Assignment{}, fmt.Errorf("blob params cannot be nil")
}
assignments, err := GetAssignments(state, blobParams, quorum)
if err != nil {
return Assignment{}, err
}
Expand All @@ -96,22 +96,21 @@ func GetAssignment(state *core.OperatorState, blobVersion BlobVersion, quorum co
return assignment, nil
}

func GetChunkLength(blobVersion BlobVersion, blobLength uint32) (uint32, error) {

func GetChunkLength(blobLength uint32, blobParams *core.BlobVersionParameters) (uint32, error) {
if blobLength == 0 {
return 0, fmt.Errorf("blob length must be greater than 0")
}

if blobParams == nil {
return 0, fmt.Errorf("blob params cannot be nil")
}

// Check that the blob length is a power of 2
if blobLength&(blobLength-1) != 0 {
return 0, fmt.Errorf("blob length %d is not a power of 2", blobLength)
}

if _, ok := ParametersMap[blobVersion]; !ok {
return 0, fmt.Errorf("blob version %d not found", blobVersion)
}

chunkLength := blobLength * ParametersMap[blobVersion].CodingRate / ParametersMap[blobVersion].NumChunks
chunkLength := blobLength * blobParams.CodingRate / blobParams.NumChunks
if chunkLength == 0 {
chunkLength = 1
}
Expand Down
Loading

0 comments on commit b24f23d

Please sign in to comment.