Skip to content

Commit

Permalink
not working yet
Browse files Browse the repository at this point in the history
  • Loading branch information
Ubuntu committed Dec 7, 2023
1 parent 789cc64 commit 4fc22e9
Show file tree
Hide file tree
Showing 6 changed files with 126 additions and 7 deletions.
3 changes: 3 additions & 0 deletions core/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ type Encoder interface {
// VerifyBlobLength takes in the commitments and returns an error if the blob length is invalid.
VerifyBlobLength(commitments BlobCommitments) error

// VerifyBlobLengthBatched takes in batch of commitments and return an error if any blob length is invalid
VerifyBlobLengthBatched(commitments []BlobCommitments) error

// Decode takes in the chunks, indices, and encoding parameters and returns the decoded blob
Decode(chunks []*Chunk, indices []ChunkNumber, params EncodingParams, inputSize uint64) ([]byte, error)
}
Expand Down
17 changes: 17 additions & 0 deletions core/encoding/encoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"github.com/Layr-Labs/eigenda/core"
"github.com/Layr-Labs/eigenda/pkg/encoding/encoder"
"github.com/Layr-Labs/eigenda/pkg/encoding/kzgEncoder"
wbls "github.com/Layr-Labs/eigenda/pkg/kzg/bn254"
lru "github.com/hashicorp/golang-lru/v2"
)

Expand Down Expand Up @@ -103,6 +104,22 @@ func (e *Encoder) VerifyBlobLength(commitments core.BlobCommitments) error {

}

func (e *Encoder) VerifyBlobLengthBatched(commitments []core.BlobCommitments) error {
numBlob := len(commitments)
commits := make([]wbls.G1Point, numBlob)
proofs := make([]wbls.G1Point, numBlob)
degrees := make([]uint64, numBlob)

for i, c := range commitments {
commits[i] = *c.Commitment.G1Point
proofs[i] = *c.LengthProof.G1Point
degrees[i] = uint64(c.Length - 1)
}

return e.EncoderGroup.VerifyBatchedLengthProof(commits, proofs, degrees)

}

func (e *Encoder) VerifyChunks(chunks []*core.Chunk, indices []core.ChunkNumber, commitments core.BlobCommitments, params core.EncodingParams) error {

encParams := toEncParams(params)
Expand Down
7 changes: 7 additions & 0 deletions core/encoding/mock_encoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,13 @@ func (e *MockEncoder) VerifyBlobLength(commitments core.BlobCommitments) error {
return args.Error(0)
}

func (e *MockEncoder) VerifyBlobLengthBatched(commitments []core.BlobCommitments) error {

args := e.Called(commitments)
time.Sleep(e.Delay)
return args.Error(0)
}

func (e *MockEncoder) Decode(chunks []*core.Chunk, indices []core.ChunkNumber, params core.EncodingParams, maxInputSize uint64) ([]byte, error) {
args := e.Called(chunks, indices, params, maxInputSize)
time.Sleep(e.Delay)
Expand Down
20 changes: 15 additions & 5 deletions core/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,17 +130,21 @@ func (v *chunkValidator) UpdateOperatorID(operatorID OperatorID) {

func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *OperatorState) error {
subBatchMap := make(map[EncodingParams]*SubBatch)
lenProofs := make([]BlobCommitments, len(blobs))

for _, blob := range blobs {
for z, blob := range blobs {
if len(blob.Bundles) != len(blob.BlobHeader.QuorumInfos) {
return errors.New("number of bundles does not match number of quorums")
}

// Collect all length proof
lenProofs[z] = blob.BlobHeader.BlobCommitments

// Validate the blob length
err := v.encoder.VerifyBlobLength(blob.BlobHeader.BlobCommitments)
if err != nil {
return err
}
//err := v.encoder.VerifyBlobLength(blob.BlobHeader.BlobCommitments)
//if err != nil {
// return err
//}
// for each quorum
for _, quorumHeader := range blob.BlobHeader.QuorumInfos {
// Check if the operator is a member of the quorum
Expand Down Expand Up @@ -182,6 +186,12 @@ func (v *chunkValidator) ValidateBatch(blobs []*BlobMessage, operatorState *Oper
}
}

// verify length proof
err := v.encoder.VerifyBlobLengthBatched(lenProofs)
if err != nil {
return err
}

// Parallelize the universal verification for each subBatch
numSubBatch := len(subBatchMap)
out := make(chan error, numSubBatch)
Expand Down
4 changes: 2 additions & 2 deletions pkg/encoding/kzgEncoder/multiframe.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ type Sample struct {
// generate a random value using Fiat Shamir transform
// we can also pseudo randomness generated locally, but we have to ensure no adv can manipulate it
// Hashing everything takes about 1ms, so Fiat Shamir transform does not incur much cost
func GenRandomness(params rs.EncodingParams, samples []Sample, m int) (bls.Fr, error) {
func GenUniversalRandomness(params rs.EncodingParams, samples []Sample) (bls.Fr, error) {
var buffer bytes.Buffer
enc := gob.NewEncoder(&buffer)

Expand Down Expand Up @@ -67,7 +67,7 @@ func (group *KzgEncoderGroup) UniversalVerify(params rs.EncodingParams, samples
n := len(samples)
fmt.Printf("Batch verify %v frames of %v symbols out of %v blobs \n", n, params.ChunkLen, m)

r, err := GenRandomness(params, samples, m)
r, err := GenUniversalRandomness(params, samples)
if err != nil {
return err
}
Expand Down
82 changes: 82 additions & 0 deletions pkg/encoding/kzgEncoder/verifier.go
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
package kzgEncoder

import (
"bytes"
"encoding/gob"
"errors"
"math"

rs "github.com/Layr-Labs/eigenda/pkg/encoding/encoder"
kzg "github.com/Layr-Labs/eigenda/pkg/kzg"
bls "github.com/Layr-Labs/eigenda/pkg/kzg/bn254"
wbls "github.com/Layr-Labs/eigenda/pkg/kzg/bn254"
)

Expand Down Expand Up @@ -80,6 +83,85 @@ func (v *KzgEncoderGroup) VerifyCommit(commit, lowDegreeProof *wbls.G1Point, deg

}

func GenLenProofRandomness(commits []wbls.G1Point) (bls.Fr, error) {
var buffer bytes.Buffer
enc := gob.NewEncoder(&buffer)

err := enc.Encode(commits)
if err != nil {
return bls.ZERO, err
}

var randomFr bls.Fr

err = wbls.HashToSingleField(&randomFr, buffer.Bytes())
if err != nil {
return bls.ZERO, err
}

return randomFr, nil
}

func (v *KzgEncoderGroup) VerifyBatchedLengthProof(commits, lowDegreeProofs []wbls.G1Point, degrees []uint64) error {

n := len(commits)

r, err := GenLenProofRandomness(commits)
if err != nil {
return err
}

randomsFr := make([]bls.Fr, n)
onesFr := make([]bls.Fr, n)

wbls.CopyFr(&randomsFr[0], &r)

var sumRandomsFr bls.Fr
wbls.CopyFr(&sumRandomsFr, &wbls.ZERO)

// power of r
for j := 0; j < n-1; j++ {
wbls.MulModFr(&randomsFr[j+1], &randomsFr[j], &r)
}

// sum of randomFr
for j := 0; j < n; j++ {
wbls.AddModFr(&sumRandomsFr, &sumRandomsFr, &randomsFr[j])
}

//for batchedCommits
for j := 0; j < n; j++ {
wbls.CopyFr(&onesFr[j], &wbls.ONE)
}

batchedCommits := wbls.LinCombG1(commits, randomsFr)

// claimed degree point, can potentially optimize by grouping nodes with same degree
degreesPoint := make([]wbls.G2Point, n)
for j := 0; j < n; j++ {
claimedDegree := degrees[j]
wbls.CopyG2(&degreesPoint[j], &v.Srs.G2[v.SRSOrder-1-claimedDegree])
}

batchedDegree := wbls.LinCombG2(degreesPoint, onesFr)

// batched degree proof

batchedProof := wbls.LinCombG1(lowDegreeProofs, onesFr)

// batched G2
var batchedG2 wbls.G2Point

wbls.MulG2(&batchedG2, &bls.GenG2, &sumRandomsFr)

if wbls.PairingsVerify(batchedCommits, batchedDegree, batchedProof, &batchedG2) {
return nil
} else {
return errors.New("batched low degree proof fails")
}

}

func (v *KzgVerifier) VerifyFrame(commit *wbls.G1Point, f *Frame, index uint64) error {

j, err := rs.GetLeadingCosetIndex(
Expand Down

0 comments on commit 4fc22e9

Please sign in to comment.