diff --git a/massifs/go.mod b/massifs/go.mod index cab9e2f..669cee0 100644 --- a/massifs/go.mod +++ b/massifs/go.mod @@ -2,6 +2,8 @@ module github.com/datatrails/go-datatrails-merklelog/massifs go 1.22 +replace github.com/datatrails/go-datatrails-merklelog/mmr => ../mmr + require ( github.com/datatrails/go-datatrails-merklelog/mmr v0.0.2 github.com/datatrails/go-datatrails-merklelog/mmrtesting v0.1.0 @@ -9,7 +11,7 @@ require ( require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 - github.com/datatrails/go-datatrails-common v0.15.1 + github.com/datatrails/go-datatrails-common v0.18.0 github.com/google/uuid v1.6.0 github.com/stretchr/testify v1.9.0 github.com/veraison/go-cose v1.1.0 @@ -17,56 +19,46 @@ require ( require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.4.0 // indirect - github.com/Azure/go-amqp v1.0.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 // indirect + github.com/Azure/go-amqp v1.0.5 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/KimMachineGun/automemlimit v0.3.0 // indirect - github.com/cilium/ebpf v0.12.3 // indirect - github.com/containerd/cgroups/v3 v3.0.2 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/fxamacker/cbor/v2 v2.5.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.2 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/fxamacker/cbor v1.5.1 + github.com/fxamacker/cbor/v2 v2.6.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.4 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/ldclabs/cose/go v0.0.0-20221214142927-d22c1cfc2154 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect - github.com/openzipkin/zipkin-go v0.4.2 // indirect + github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/x448/float16 v0.8.4 // indirect - go.uber.org/automaxprocs v1.5.3 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/massifs/go.sum b/massifs/go.sum index 1eae2fd..e687e21 100644 --- a/massifs/go.sum +++ b/massifs/go.sum @@ -1,29 +1,30 @@ github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0 h1:Yoicul8bnVdQrhDMTHxdEckRGX01XvwXDHUT9zYZ3k0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0/go.mod h1:+6sju8gk8FRmSajX3Oz4G5Gm7P+mbqE9FVaXXFYTkCM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.4.0 h1:MxbPJrYY81a8xnMml4qICSq1z2WusPw3jSfdIMupnYM= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.4.0/go.mod h1:pXDkeh10bAqElvd+S5Ppncj+DCKvJGXNa8rRT2R7rIw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 h1:o/Ws6bEqMeKZUfj1RRm3mQ51O8JGU5w+Qdg2AhHib6A= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1/go.mod h1:6QAMYBAbQeeKX+REFJMZ1nFWu9XLw/PPcjYpuc9RDFs= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= -github.com/Azure/go-amqp v1.0.0 h1:QfCugi1M+4F2JDTRgVnRw7PYXLXZ9hmqk3+9+oJh3OA= -github.com/Azure/go-amqp v1.0.0/go.mod h1:+bg0x3ce5+Q3ahCEXnCsGG3ETpDQe3MEVnOuT2ywPwc= +github.com/Azure/go-amqp v1.0.5 h1:po5+ljlcNSU8xtapHTe8gIc8yHxCzC03E8afH2g1ftU= +github.com/Azure/go-amqp v1.0.5/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -37,20 +38,10 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= -github.com/KimMachineGun/automemlimit v0.3.0 h1:khgwM5ESVN85cE6Bq2ozMAAWDfrOEwQ51D/YlmThE04= -github.com/KimMachineGun/automemlimit v0.3.0/go.mod h1:pJhTW/nWJMj6SnWSU2TEKSlCaM+1N5Mej+IfS/5/Ol0= -github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= -github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= -github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= -github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/datatrails/go-datatrails-common v0.15.1 h1:wu3Gs6v7TkMLltzavPY2aHPniJabEiuqSJSHW79bX+4= -github.com/datatrails/go-datatrails-common v0.15.1/go.mod h1:lVLYVw5o+Wj+z8sn8bJBzp9qBCdYQ0DUX91+R5Gn73Q= -github.com/datatrails/go-datatrails-merklelog/mmr v0.0.2 h1:Jxov4/onoFiCISLQNSPy/nyt3USAEvUZpEjlScHJYKI= -github.com/datatrails/go-datatrails-merklelog/mmr v0.0.2/go.mod h1:+Oz8O6bns0rF6gr03xJzKTBzUzyskZ8Gics8/qeNzYk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/datatrails/go-datatrails-common v0.18.0 h1:OeNP4EdIjhLHnE/mdN2/kp6Fq+xOnE6Y2p3DKg4xXHw= +github.com/datatrails/go-datatrails-common v0.18.0/go.mod h1:fBDqKHRLUYcictdWdLrIhKNhieKVE2r0II8vyETCuhM= github.com/datatrails/go-datatrails-merklelog/mmrtesting v0.1.0 h1:q9RXtAGydXKSJjARnFObNu743cbfIOfERTXiiVa2tF4= github.com/datatrails/go-datatrails-merklelog/mmrtesting v0.1.0/go.mod h1:rWFjeK1NU7qnhl9+iKdjASpw/CkPwDAOPHsERYR7uEQ= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -58,43 +49,31 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= -github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= -github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/fxamacker/cbor v1.5.1 h1:XjQWBgdmQyqimslUh5r4tUGmoqzHmBFQOImkWGi2awg= +github.com/fxamacker/cbor v1.5.1/go.mod h1:3aPGItF174ni7dDzd6JZ206H8cmr4GDNBGpPa971zsU= +github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= +github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= +github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1/go.mod h1:YvJ2f6MplWDhfxiUC3KpyTy76kYUZA4W3pTv/wdKQ9Y= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk= -github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -105,8 +84,6 @@ github.com/ldclabs/cose/go v0.0.0-20221214142927-d22c1cfc2154 h1:+ANMOp3EbA4WEKS github.com/ldclabs/cose/go v0.0.0-20221214142927-d22c1cfc2154/go.mod h1:ItUTr90SrkBAvLf5UsxqN+lMfF1rw21mEcFa28XqOzQ= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= -github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= @@ -116,23 +93,23 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 h1:uhcF5Jd7rP9DVEL10Siffyepr6SvlKbUsjH5JpNCRi8= github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0/go.mod h1:+oCZ5GXXr7KPI/DNOQORPTq5AWHfALJj9c72b0+YsEY= -github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA= -github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= +github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/veraison/go-cose v1.1.0 h1:AalPS4VGiKavpAzIlBjrn7bhqXiXi4jbMYY/2+UC+4o= @@ -140,77 +117,75 @@ github.com/veraison/go-cose v1.1.0/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 h1:W12Pwm4urIbRdGhMEg2NM9O3TWKjNcxQhs46V0ypf/k= -google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= -google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 h1:ZcOkrmX74HbKFYnpPY8Qsw93fC29TbJXspYKaBkSXDQ= -google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= +nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= diff --git a/massifs/heightindex.go b/massifs/heightindex.go new file mode 100644 index 0000000..6bc8f66 --- /dev/null +++ b/massifs/heightindex.go @@ -0,0 +1,36 @@ +package massifs + +// TreeRootIndex returns the root index for the tree with height +func TreeRootIndex(height uint8) uint64 { + return (1 << height) - 2 +} + +// RangeRootIndex return the Massif root node's mmr index in the overall MMR given +// the massif height and the first index of the MMR it contains +func RangeRootIndex(firstIndex uint64, height uint8) uint64 { + return firstIndex + (1 << height) - 2 +} + +// RangeLastLeafIndex returns the mmr index of the last leaf given the first +// index of a massif and its height. +func RangeLastLeafIndex(firstIndex uint64, height uint8) uint64 { + return firstIndex + TreeLastLeafIndex(height) +} + +// TreeLastLeafIndex returns the *MMR* index of the last leaf in the tree with +// the given height (1 << h) - h -1 works because the number of nodes required +// to include the last leaf is always equal to the MMR height produced by node +func TreeLastLeafIndex(height uint8) uint64 { + return (1 << height) - uint64(height) - 1 +} + +// TreeSize returns the maximum byte size of the tree based on the defined log +// entry size +func TreeSize(height uint8) uint64 { + return TreeCount(height) * LogEntryBytes +} + +// TreeCount returns the node count +func TreeCount(height uint8) uint64 { + return ((1 << height) - 1) +} diff --git a/massifs/localmassifreader.go b/massifs/localmassifreader.go index 894513b..2d86dbe 100644 --- a/massifs/localmassifreader.go +++ b/massifs/localmassifreader.go @@ -216,7 +216,8 @@ func (r *LocalReader) ReplaceVerifiedContext( // Note: ensure that the root is *never* written to disc or available in the // cached copy of the seal, so that it always has to be recomputed. state := vc.MMRState - state.Root = nil + state.LegacySealRoot = nil + state.Peaks = nil return r.cache.ReplaceSeal(sealFilename, vc.Start.MassifIndex, &SealedState{ Sign1Message: vc.Sign1Message, diff --git a/massifs/logformat_test.go b/massifs/logformat_test.go index 123063b..188a731 100644 --- a/massifs/logformat_test.go +++ b/massifs/logformat_test.go @@ -98,8 +98,8 @@ func TestMassifLogEntries(t *testing.T) { // massif height just determines how many *leaves* are in a full // blob.. and the leaf count is constant for all blobs. The back // fill nodes required for each blobs 'last' leaf varies. - firstIndex := mmr.TreeIndex(massifLeafCount * massifIndex) - lastIndex := mmr.TreeIndex(massifLeafCount*(massifIndex+1)) - 1 + firstIndex := mmr.MMRIndex(massifLeafCount * massifIndex) + lastIndex := mmr.MMRIndex(massifLeafCount*(massifIndex+1)) - 1 expectNodeCount := lastIndex - firstIndex + 1 // for each massif try a range of log sizes. the sizes can be invalid if the startOffset or endOffset are negative. diff --git a/massifs/massifcontext.go b/massifs/massifcontext.go index d465bee..4a0ee13 100644 --- a/massifs/massifcontext.go +++ b/massifs/massifcontext.go @@ -110,7 +110,7 @@ func (mc *MassifContext) CopyPeakStack() map[uint64]int { // with how GetRoot accesses the store. The default configuration works only for // how leaf addition accesses the stack. func (mc *MassifContext) CreatePeakStackMap() error { - mc.peakStackMap = PeakStackMap(mc.Start.MassifHeight, mc.Start.FirstIndex+1) + mc.peakStackMap = PeakStackMap(mc.Start.MassifHeight, mc.Start.FirstIndex) if mc.peakStackMap == nil { return fmt.Errorf("invalid massif height or first index in start record") } @@ -426,17 +426,19 @@ func (mc *MassifContext) AddHashedLeaf( // // This generates a consistency proof from the mmr index identified by the state // size to the last mmr index present in the context. That proof is then -// verified as consistent with the root provided in the base state. +// verified as consistent with the accumulator provided in the base state. // // Returns: -// - the latest root on success -// - an error otherwise (the returned root is nil) -func (mc *MassifContext) CheckConsistency(baseState MMRState) ([]byte, error) { +// - the latest accumulator on success +// - an error otherwise (the returned accumulator is nil) +func (mc *MassifContext) CheckConsistency( + baseState MMRState) ([][]byte, error) { - if baseState.Root == nil { + if baseState.Peaks == nil { return nil, ErrStateRootMissing } + // Note: this can never be 0, because we always create a new massif with at least one node mmrSizeCurrent := mc.RangeCount() if mmrSizeCurrent < baseState.MMRSize { @@ -451,15 +453,8 @@ func (mc *MassifContext) CheckConsistency(baseState MMRState) ([]byte, error) { return nil, nil } - cp, err := mmr.IndexConsistencyProof( - baseState.MMRSize, mmrSizeCurrent, mc, sha256.New()) - if err != nil { - return nil, fmt.Errorf( - "%w: failed to produce proof. tenant=%s, massif=%d", - ErrGeneratingConsistencyProof, mc.TenantIdentity, mc.Start.MassifIndex) - } - - ok, rootB, err := mmr.CheckConsistency(mc, sha256.New(), cp, baseState.Root) + ok, peaksB, err := mmr.CheckConsistency( + mc, sha256.New(), baseState.MMRSize, mmrSizeCurrent, baseState.Peaks) if err != nil { return nil, fmt.Errorf("%w: proof verification error: err=%s, tenant=%s, massif=%d", @@ -473,7 +468,7 @@ func (mc *MassifContext) CheckConsistency(baseState MMRState) ([]byte, error) { mc.TenantIdentity, mc.Start.MassifIndex) } - return rootB, nil + return peaksB, nil } // setLastIdTimestamp must be called after A @@ -640,38 +635,3 @@ func (mc MassifContext) RangeCount() uint64 { func (mc MassifContext) LastLeafMMRIndex() uint64 { return RangeLastLeafIndex(mc.Start.FirstIndex, mc.Start.MassifHeight) } - -// TreeRootIndex returns the root index for the tree with height -func TreeRootIndex(height uint8) uint64 { - return (1 << height) - 2 -} - -// RangeRootIndex return the Massif root node's index in the overall MMR given -// the massif height and the first index of the MMR it contains -func RangeRootIndex(firstIndex uint64, height uint8) uint64 { - return firstIndex + (1 << height) - 2 -} - -// RangeLastLeafIndex returns the mmr index of the last leaf given the first -// index of a massif and its height. -func RangeLastLeafIndex(firstIndex uint64, height uint8) uint64 { - return firstIndex + TreeLastLeafIndex(height) -} - -// TreeLastLeafIndex returns the *MMR* index of the last leaf in the tree with -// the given height (1 << h) - h -1 works because the number of nodes required -// to include the last leaf is always equal to the MMR height produced by node -func TreeLastLeafIndex(height uint8) uint64 { - return (1 << height) - uint64(height) - 1 -} - -// TreeSize returns the maximum byte size of the tree based on the defined log -// entry size -func TreeSize(height uint8) uint64 { - return TreeCount(height) * LogEntryBytes -} - -// MaxCount returns the node count -func TreeCount(height uint8) uint64 { - return ((1 << height) - 1) -} diff --git a/massifs/massifcontextverified.go b/massifs/massifcontextverified.go index 3d5ef6d..b672737 100644 --- a/massifs/massifcontextverified.go +++ b/massifs/massifcontextverified.go @@ -1,7 +1,6 @@ package massifs import ( - "bytes" "context" "crypto" "crypto/sha256" @@ -57,10 +56,9 @@ type VerifiedContext struct { // context data against the seal state for the massif. If a previously // trusted state was provided when verification was performed, this state is // also consistent with that. When configured to use "bagged" peaks for - // verification purposes, this will be the bagged root of the mmr up to the - // end of the data. Otherwise, it will be the accumulator state (which is a - // series of roots concatenated into a single byte array). - ConsistentRoots []byte + // verification purposes, this will be the single bagged root of the mmr up to the + // end of the data. Otherwise, it will be the accumulator peaks. + ConsistentRoots [][]byte } // checkedVerifiedContextOptions checks the options provided satisfy the common requirements of the reader methods @@ -157,6 +155,10 @@ func (mc *MassifContext) verifyContext( ctx context.Context, options ReaderOptions, ) (*VerifiedContext, error) { + var ok bool + var err error + var peaksB [][]byte + // This checks that any un-committed data is consistent with the latest seal available for the massif msg, state, err := options.sealGetter.GetSignedRoot(ctx, mc.TenantIdentity, mc.Start.MassifIndex) @@ -169,16 +171,18 @@ func (mc *MassifContext) verifyContext( return nil, err } - state.Root, err = mmr.GetRoot(state.MMRSize, mc, sha256.New()) + // get the peaks from the local store, we are checking the store against the + // latest additions. as we verify the signature below, any changes to the + // store will be caught. + state.Peaks, err = mmr.PeakHashes(mc, state.MMRSize-1) if err != nil { - return nil, fmt.Errorf("%w: failed to get root from massif %d for tenant %s: %v", ErrSealNotFound, mc.Start.MassifIndex, mc.TenantIdentity, err) + return nil, err } // NOTICE: The verification uses the public key that is provided on the // message. If the caller wants to ensure the massif is signed by the // expected key then they must obtain a copy of the public key from a source // they trust and supply it as an option. - pubKeyProvider := cose.NewCWTPublicKeyProvider(msg) if options.trustedSealerPubKey != nil { @@ -192,7 +196,11 @@ func (mc *MassifContext) verifyContext( } } - err = VerifySignedRoot( + // Ensure the peaks we read from the store are the ones that were signed. + // Otherwise we can get caught out by the store tampered after the seal was + // created. Of course the seal itself could have been replaced, but at that + // point the only defense is an indpendent replica. + err = VerifySignedCheckPoint( *options.codec, pubKeyProvider, msg, state, nil, ) if err != nil { @@ -201,10 +209,18 @@ func (mc *MassifContext) verifyContext( ErrSealVerifyFailed, mc.Start.MassifIndex, mc.TenantIdentity, err) } - var rootB []byte - rootB, err = mc.CheckConsistency(state) + // This verifies the peaks read from mmrSizeA are consistent with mmrSizeB. + ok, peaksB, err = mmr.CheckConsistency( + mc, sha256.New(), state.MMRSize, mc.RangeCount(), state.Peaks) if err != nil { - return nil, err + return nil, fmt.Errorf( + "%w: error verifying accumulator state from massif %d for tenant %s", + err, mc.Start.MassifIndex, mc.TenantIdentity) + } + if !ok { + // We don't expect false without error, but we + return nil, fmt.Errorf("%w: failed to verify accumulator state massif %d for tenant %s", + mmr.ErrConsistencyCheck, mc.Start.MassifIndex, mc.TenantIdentity) } // If the caller has provided a trusted base state, also verify against @@ -213,17 +229,19 @@ func (mc *MassifContext) verifyContext( // check the remote log is consistent with the log portion they have locally // before replicating the new data. if options.trustedBaseState != nil { - rootB2, err := mc.CheckConsistency(*options.trustedBaseState) + + ok, _, err = mmr.CheckConsistency( + mc, sha256.New(), + options.trustedBaseState.MMRSize, + mc.RangeCount(), + options.trustedBaseState.Peaks) if err != nil { return nil, err } - // rootB above will be nil if the new state is the same as the trusted - // state, in which case there is no value in getting the root in order - // to do the compare. - if rootB != nil && !bytes.Equal(rootB, rootB2) { + if !ok { return nil, fmt.Errorf( - "%w: the root produced for the trusted base state doesn't match the root produced for the seal state fetched from the log", - ErrInconsistentState) + "%w: the accumulator produced for the trusted base state doesn't match the root produced for the seal state fetched from the log", + mmr.ErrConsistencyCheck) } } @@ -231,6 +249,6 @@ func (mc *MassifContext) verifyContext( MassifContext: *mc, Sign1Message: *msg, MMRState: state, - ConsistentRoots: rootB, + ConsistentRoots: peaksB, }, nil } diff --git a/massifs/massifindex.go b/massifs/massifindex.go new file mode 100644 index 0000000..a4d86fe --- /dev/null +++ b/massifs/massifindex.go @@ -0,0 +1,49 @@ +package massifs + +import "github.com/datatrails/go-datatrails-merklelog/mmr" + +// MassifIndexFromLeafIndex gets the massif index of the massif that the given leaf is stored in, +// +// given the leaf index of the leaf. +// +// This is found with the given massif height, which is constant for all massifs. +func MassifIndexFromLeafIndex(massifHeight uint8, leafIndex uint64) uint64 { + + // first find how many leaf nodes each massif can hold. + // + // Note: massifHeight starts at index 1, whereas height index for HeighIndexLeafCount starts at 0. + massifMaxLeaves := mmr.HeightIndexLeafCount(uint64(massifHeight) - 1) + + // now find the massif. + // + // for context, see: https://github.com/datatrails/epic-8120-scalable-proof-mechanisms/blob/main/mmr/forestrie-mmrblobs.md#blob-size + // + // Note: massif indexes start at 0. + // Note: leaf indexes starts at 0. + // + // Therefore, given a massif height of 2, that has max leaves of 4; + // if a leaf index of 3 is given, then it is in massif 0, along with leaves, 0, 1 and 2. + return leafIndex / massifMaxLeaves + +} + +// MassifIndexFromMMRIndex gets the massif index of the massif that the given leaf is stored in +// +// given the mmr index of the leaf. +// +// NOTE: if the mmrIndex is not a leaf node, then error is returned. +func MassifIndexFromMMRIndex(massifHeight uint8, mmrIndex uint64) uint64 { + + leafIndex := mmr.LeafIndex(mmrIndex) + + return MassifIndexFromLeafIndex(massifHeight, leafIndex) + +} + +// MassifFromLeaf computes the massif index given a leaf index and the configured massif height (one based) for the log. +func MassifFromLeaf(massifHeight uint8, leafIndex uint64) uint64 { + + // Note: massifHeight starts at index 1, whereas height index for HeighIndexLeafCount starts at 0. + massifMaxLeaves := mmr.HeightIndexLeafCount(uint64(massifHeight) - 1) + return leafIndex / massifMaxLeaves +} diff --git a/massifs/massifpeakstack_test.go b/massifs/massifpeakstack_test.go index 35e4e52..f30d9b1 100644 --- a/massifs/massifpeakstack_test.go +++ b/massifs/massifpeakstack_test.go @@ -79,7 +79,7 @@ func TestPeakStack_popArithmetic(t *testing.T) { lastLeaf := massifIndex*massifLeafCount + massifLeafCount - 1 spurHeightLeaf := mmr.SpurHeightLeaf(lastLeaf) - iPeak := mmr.TreeIndex(lastLeaf) + spurHeightLeaf + iPeak := mmr.MMRIndex(lastLeaf) + spurHeightLeaf stackLen := mmr.LeafMinusSpurSum(massifIndex) @@ -595,7 +595,7 @@ func TestPeakStack_Height4Massif2to3Size63(t *testing.T) { assert.Equal(t, mc3.peakStackMap[iPeakNode30], iStack30) assert.Equal(t, mc3.peakStackMap[iPeakNode45], iStack45) - proof, err := mmr.IndexProof(mmrSizeB, &mc3, sha256.New(), iPeakNode30) + proof, err := mmr.InclusionProofBagged(mmrSizeB, &mc3, sha256.New(), iPeakNode30) require.NoError(t, err) peakHash, err := mc3.Get(iPeakNode30) @@ -603,6 +603,6 @@ func TestPeakStack_Height4Massif2to3Size63(t *testing.T) { root, err := mmr.GetRoot(mmrSizeB, &mc3, sha256.New()) require.NoError(t, err) - ok = mmr.VerifyInclusion(mmrSizeB, sha256.New(), peakHash, 30, proof, root) + ok = mmr.VerifyInclusionBagged(mmrSizeB, sha256.New(), peakHash, 30, proof, root) assert.True(t, ok) } diff --git a/massifs/massifstart.go b/massifs/massifstart.go index ddc591b..0fa4ac8 100644 --- a/massifs/massifstart.go +++ b/massifs/massifstart.go @@ -140,7 +140,7 @@ func MassifFirstLeaf(massifHeight uint8, massifIndex uint32) uint64 { // And now we can apply TreeIndex to the leaf index. This last is an // iterative call but it is sub linear. Essentially its O(tree height) (not // massif height ofc) - return mmr.TreeIndex(leafIndex) + return mmr.MMRIndex(leafIndex) } func (ms MassifStart) MarshalBinary() ([]byte, error) { diff --git a/massifs/masssifreader.go b/massifs/masssifreader.go index e9276c7..79b758f 100644 --- a/massifs/masssifreader.go +++ b/massifs/masssifreader.go @@ -6,7 +6,6 @@ import ( "github.com/datatrails/go-datatrails-common/azblob" "github.com/datatrails/go-datatrails-common/logger" - "github.com/datatrails/go-datatrails-merklelog/mmr" ) var ( @@ -161,41 +160,3 @@ func (mr *MassifReader) GetFirstMassif( return mc, nil } - -// MassifIndexFromLeafIndex gets the massif index of the massif that the given leaf is stored in, -// -// given the leaf index of the leaf. -// -// This is found with the given massif height, which is constant for all massifs. -func MassifIndexFromLeafIndex(massifHeight uint8, leafIndex uint64) uint64 { - - // first find how many leaf nodes each massif can hold. - // - // Note: massifHeight starts at index 1, whereas height index for HeighIndexLeafCount starts at 0. - massifMaxLeaves := mmr.HeightIndexLeafCount(uint64(massifHeight) - 1) - - // now find the massif. - // - // for context, see: https://github.com/datatrails/epic-8120-scalable-proof-mechanisms/blob/main/mmr/forestrie-mmrblobs.md#blob-size - // - // Note: massif indexes start at 0. - // Note: leaf indexes starts at 0. - // - // Therefore, given a massif height of 2, that has max leaves of 4; - // if a leaf index of 3 is given, then it is in massif 0, along with leaves, 0, 1 and 2. - return leafIndex / massifMaxLeaves - -} - -// MassifIndexFromMMRIndex gets the massif index of the massif that the given leaf is stored in -// -// given the mmr index of the leaf. -// -// NOTE: if the mmrIndex is not a leaf node, then error is returned. -func MassifIndexFromMMRIndex(massifHeight uint8, mmrIndex uint64) uint64 { - - leafIndex := mmr.LeafIndex(mmrIndex) - - return MassifIndexFromLeafIndex(massifHeight, leafIndex) - -} diff --git a/massifs/mmriver.go b/massifs/mmriver.go new file mode 100644 index 0000000..0cd8371 --- /dev/null +++ b/massifs/mmriver.go @@ -0,0 +1,243 @@ +package massifs + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + + "github.com/datatrails/go-datatrails-common/azblob" + commoncbor "github.com/datatrails/go-datatrails-common/cbor" + "github.com/fxamacker/cbor/v2" + + commoncose "github.com/datatrails/go-datatrails-common/cose" + "github.com/datatrails/go-datatrails-common/logger" + "github.com/datatrails/go-datatrails-merklelog/mmr" +) + +// MMRIVER COSE Receipts to accompany our COSE MMRIVER seals + +type MMRiverInclusionProof struct { + Index uint64 `cbor:"1,keyasint"` + InclusionPath [][]byte `cbor:"2,keyasint"` +} + +type MMRiverConsistencyProof struct { + TreeSize1 uint64 `cbor:"1,keyasint"` + TreeSize2 uint64 `cbor:"2,keyasint"` + ConsistencyPaths [][]byte `cbor:"3,keyasint"` + RightPeaks [][]byte `cbor:"4,keyasint"` +} + +type MMRiverVerifiableProofs struct { + InclusionProofs []MMRiverInclusionProof `cbor:"-1,keyasint,omitempty"` + ConsistencyProofs []MMRiverConsistencyProof `cbor:"-2,keyasint,omitempty"` +} + +// MMRiverInclusionProofHeader provides for encoding, and defered decoding, of +// COSE_Sign1 message headers for MMRIVER receipts +type MMRiverVerifiableProofsHeader struct { + VerifiableProofs MMRiverVerifiableProofs `cbor:"396,keyasint"` +} + +// VerifySignedInclusionReceipts verifies a signed COSE receipt encoded according to the MMRIVER VDS +// on success the produced root is returned. +// Signature verification failure is not an error, but the returned root will be nil and the result will be false. +// All other unexpected issues are returned as errors, with a false result and nil root. +// Note that MMRIVER receipts allow for multiple inclusion proofs to be attached to the receipt. +// This function returns true only if ALL receipts verify +// +// The candidates array provides the *candidate* values. Once verified, we can call them node values (or leaves), +// Note that any node value in the log may be proven by a receipt, not just leaves. +func VerifySignedInclusionReceipts( + ctx context.Context, + receipt *commoncose.CoseSign1Message, + candidates [][]byte, +) (bool, []byte, error) { + + var err error + + // ignore any existing payload + receipt.Payload = nil + + // We must return false if there are no candidates + if len(candidates) == 0 { + return false, nil, fmt.Errorf("no candidates provided") + } + + var header MMRiverVerifiableProofsHeader + err = cbor.Unmarshal(receipt.Headers.RawUnprotected, &header) + if err != nil { + return false, nil, fmt.Errorf("MMRIVER receipt proofs malformed") + } + verifiableProofs := header.VerifiableProofs + if len(verifiableProofs.InclusionProofs) == 0 { + return false, nil, fmt.Errorf("MMRIVER receipt inclusion proofs not present") + } + + // permit *fewer* candidates than proofs, but not more + if len(candidates) > len(verifiableProofs.InclusionProofs) { + return false, nil, fmt.Errorf("MMRIVER receipt more candidates than proofs") + } + + var proof MMRiverInclusionProof + + proof = verifiableProofs.InclusionProofs[0] + receipt.Payload = mmr.IncludedRoot( + sha256.New(), + proof.Index, candidates[0], + proof.InclusionPath) + + err = receipt.VerifyWithCWTPublicKey(nil) + if err != nil { + return false, nil, fmt.Errorf( + "MMRIVER receipt VERIFY FAILED for: mmrIndex %d, candidate %d, err %v", proof.Index, 0, err) + } + // verify the first proof then just compare the produced roots + + for i := 1; i < len(verifiableProofs.InclusionProofs); i++ { + + proof = verifiableProofs.InclusionProofs[i] + proven := mmr.IncludedRoot(sha256.New(), proof.Index, candidates[i], proof.InclusionPath) + if bytes.Compare(receipt.Payload, proven) != 0 { + return false, nil, fmt.Errorf( + "MMRIVER receipt VERIFY FAILED for: mmrIndex %d, candidate %d, err %v", proof.Index, i, err) + } + } + return true, receipt.Payload, nil +} + +// VerifySignedInclusionReceipt verifies a reciept comprised of a single inclusion proof +// If there are 0 or more than 1 candidates, the result will be false and an error will be returned +func VerifySignedInclusionReceipt( + ctx context.Context, + receipt *commoncose.CoseSign1Message, + candidate []byte, +) (bool, []byte, error) { + + ok, root, err := VerifySignedInclusionReceipts(ctx, receipt, [][]byte{candidate}) + if err != nil { + return false, nil, err + } + if !ok { + return false, nil, nil + } + return true, root, nil +} + +type verifiedContextGetter interface { + GetVerifiedContext( + ctx context.Context, tenantIdentity string, massifIndex uint64, + opts ...ReaderOption, + ) (*VerifiedContext, error) +} + +// NewReceipt returns a COSE receipt for the given tenantIdentity and mmrIndex +func NewReceipt( + ctx context.Context, + massifHeight uint8, + tenantIdentity string, mmrIndex uint64, + getter verifiedContextGetter, +) (*commoncose.CoseSign1Message, error) { + + log := logger.Sugar.FromContext(ctx) + defer log.Close() + massifIndex := uint32(MassifIndexFromMMRIndex(massifHeight, mmrIndex)) + + verified, err := getter.GetVerifiedContext(ctx, tenantIdentity, uint64(massifIndex)) + if err != nil { + return nil, fmt.Errorf( + "%w: failed to get verified context %d for %s", err, massifIndex, tenantIdentity) + } + + msg, state := verified.Sign1Message, verified.MMRState + + proof, err := mmr.InclusionProof(&verified.MassifContext, state.MMRSize-1, mmrIndex) + if err != nil { + return nil, fmt.Errorf( + "failed to generating inclusion proof: %d in MMR(%d), %v", + mmrIndex, verified.MMRState.MMRSize, err) + } + + peakIndex := mmr.PeakIndex(mmr.LeafCount(state.MMRSize), len(proof)) + + // NOTE: The old-accumulator compatibility property, from + // https://eprint.iacr.org/2015/718.pdf, along with the COSE protected & + // unprotected buckets, is why we can just pre sign the receipts. + // As long as the receipt consumer is convinced of the logs consistency (not split view), + // it does not matter which accumulator state the receipt is signed against. + + var peaksHeader MMRStateReceipts + err = cbor.Unmarshal(msg.Headers.RawUnprotected, &peaksHeader) + if err != nil { + return nil, fmt.Errorf( + "%w: failed decoding peaks header: for tenant %s, seal %d", err, tenantIdentity, massifIndex) + } + if peakIndex >= len(peaksHeader.PeakReceipts) { + return nil, fmt.Errorf( + "%w: peaks header containes to few peak receipts: for tenant %s, seal %d", err, tenantIdentity, massifIndex) + } + + // This is an array of marshaled COSE_Sign1's + receiptMsg := peaksHeader.PeakReceipts[peakIndex] + signed, err := commoncose.NewCoseSign1MessageFromCBOR( + receiptMsg, commoncose.WithDecOptions(CheckpointDecOptions())) + if err != nil { + return nil, fmt.Errorf( + "%w: failed to decode pre-signed receipt for: %d in MMR(%d)", + err, mmrIndex, state.MMRSize) + } + + // signed.Headers.RawProtected = nil + signed.Headers.RawUnprotected = nil + + verifiableProofs := MMRiverVerifiableProofs{ + InclusionProofs: []MMRiverInclusionProof{{ + Index: mmrIndex, + InclusionPath: proof}}, + } + + signed.Headers.Unprotected[VDSCoseReceiptProofsTag] = verifiableProofs + + return signed, nil +} + +type ReceiptBuilder struct { + log logger.Logger + massifReader MassifReader + cborCodec commoncbor.CBORCodec + massifHeight uint8 +} + +// newReceiptBuilder creates a new receiptBuilder configured with all the necessary readers and information required to build a receipt +// Note that errors are logged assuming the calling context is retrieving a receipt, +// and that all returned errors are StatusErrors that can be returned to the client or nil +func NewReceiptBuilder(log logger.Logger, reader azblob.Reader, massifHeight uint8) (ReceiptBuilder, error) { + + var err error + + b := ReceiptBuilder{ + log: log, + massifHeight: massifHeight, + } + + if b.cborCodec, err = NewRootSignerCodec(); err != nil { + return ReceiptBuilder{}, err + } + b.massifHeight = massifHeight + b.massifReader = NewMassifReader(log, reader) + sealReader := NewSignedRootReader(log, reader, b.cborCodec) + b.massifReader = NewMassifReader(log, reader, WithSealGetter(&sealReader)) + + return b, nil +} + +func (b *ReceiptBuilder) BuildReceipt( + ctx context.Context, tenantIdentity string, mmrIndex uint64, +) (*commoncose.CoseSign1Message, error) { + + log := b.log.FromContext(ctx) + defer log.Close() + + return NewReceipt(ctx, b.massifHeight, tenantIdentity, mmrIndex, &b.massifReader) +} diff --git a/massifs/peakstack.go b/massifs/peakstack.go index 85ac959..1169c71 100644 --- a/massifs/peakstack.go +++ b/massifs/peakstack.go @@ -6,7 +6,7 @@ import "github.com/datatrails/go-datatrails-merklelog/mmr" // PeakStackMap builds a map from mmr indices to peak stack entries // massifHeight is the 1 based height (not the height index) -func PeakStackMap(massifHeight uint8, mmrSize uint64) map[uint64]int { +func PeakStackMap(massifHeight uint8, mmrIndex uint64) map[uint64]int { if massifHeight == 0 { return nil @@ -15,12 +15,12 @@ func PeakStackMap(massifHeight uint8, mmrSize uint64) map[uint64]int { // XXX:TODO there is likely a more efficient way to do this using // PeaksBitmap or a variation of it, but this isn't a terribly hot path. stackMap := map[uint64]int{} - iPeaks := mmr.Peaks(mmrSize) + iPeaks := mmr.Peaks(mmrIndex) for i, ip := range iPeaks { - if mmr.PosHeight(ip) < uint64(massifHeight-1) { + if mmr.IndexHeight(ip) < uint64(massifHeight-1) { continue } - stackMap[ip-1] = i + stackMap[ip] = i } return stackMap diff --git a/massifs/peakstack_test.go b/massifs/peakstack_test.go index e139f07..19f2cf0 100644 --- a/massifs/peakstack_test.go +++ b/massifs/peakstack_test.go @@ -8,7 +8,7 @@ import ( func TestPeakStackMap(t *testing.T) { type args struct { massifHeight uint8 - mmrSize uint64 + mmrIndex uint64 } tests := []struct { name string @@ -17,25 +17,25 @@ func TestPeakStackMap(t *testing.T) { }{ // Note that the mmrSize used here, is also the FirstLeaf + 1 of the // massif containing the peak stack. - {"massifpeakstack_test:0", args{2, 1}, map[uint64]int{}}, - {"massifpeakstack_test:1", args{2, 4}, map[uint64]int{ + {"massifpeakstack_test:0", args{2, 0}, map[uint64]int{}}, + {"massifpeakstack_test:1", args{2, 3}, map[uint64]int{ 2: 0, }}, - {"massifpeakstack_test:2", args{2, 7}, map[uint64]int{ + {"massifpeakstack_test:2", args{2, 6}, map[uint64]int{ 6: 0, }}, - {"massifpeakstack_test:3", args{2, 10}, map[uint64]int{ + {"massifpeakstack_test:3", args{2, 9}, map[uint64]int{ 6: 0, 9: 1, }}, - {"massifpeakstack_test:4", args{2, 15}, map[uint64]int{ + {"massifpeakstack_test:4", args{2, 14}, map[uint64]int{ 14: 0, }}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := PeakStackMap(tt.args.massifHeight, tt.args.mmrSize); !reflect.DeepEqual(got, tt.want) { + if got := PeakStackMap(tt.args.massifHeight, tt.args.mmrIndex); !reflect.DeepEqual(got, tt.want) { t.Errorf("PeakStackMap() = %v, want %v", got, tt.want) } }) diff --git a/massifs/rootsigner.go b/massifs/rootsigner.go index b635a94..6feba72 100644 --- a/massifs/rootsigner.go +++ b/massifs/rootsigner.go @@ -3,21 +3,63 @@ package massifs import ( "crypto/ecdsa" "crypto/rand" + "errors" + "fmt" - dtcbor "github.com/datatrails/go-datatrails-common/cbor" - dtcose "github.com/datatrails/go-datatrails-common/cose" + commoncbor "github.com/datatrails/go-datatrails-common/cbor" + commoncose "github.com/datatrails/go-datatrails-common/cose" + "github.com/fxamacker/cbor/v2" "github.com/veraison/go-cose" ) +var ( + ErrNodeSize = errors.New("node value sizes must match the hash size") +) + +type MMRStateVersion int + +const ( + MMRStateVersion0 MMRStateVersion = iota // Implicit initial release version + MMRStateVersion1 // Version 1 + // Note: new versions must be monotonicaly assigned. + +) + +const ( + MMRStateVersionCurrent = MMRStateVersion1 + VDSCoseReceiptsTag = 395 + VDSCoseReceiptProofsTag = 396 + VDSMMRiver = 2 + VDSInclusionProof = -1 + InclusionProofIndex = 1 + InclusionProofProof = 2 + + // The numbers < -65535 are reserved for private use. + COSEPrivateStart = int64(-65535) + // Numbers in the private use space are organisation / implementation specific. + // Allocation in this range MUST be co-ordinated datatrails wide. + // Remembering that the range is *negative* we allocate the tag by + // subtracting the IANA registered tag for marking COSE Receipts proof data. + SealPeakReceiptsLabel = COSEPrivateStart - VDSCoseReceiptProofsTag +) + // MMRState defines the details we include in our signed commitment to the head log state. type MMRState struct { + + // Version is present in all seals from version 1. The initial release was implicity version 0. + Version int `cbor:"7,keyasint,omitempty"` + // The size of the mmr defines the path to the root (and the full structure // of the tree). Note that all subsequent mmr states whose size is *greater* // than this, can also (efficiently) reproduce this particular root, and // hence can be used to verify 'old' receipts. This property is due to the // strict append only structure of the tree. - MMRSize uint64 `cbor:"1,keyasint"` - Root []byte `cbor:"2,keyasint"` + MMRSize uint64 `cbor:"1,keyasint"` + LegacySealRoot []byte `cbor:"2,keyasint,omitempty"` // Valid in Version 0 only + // The peak hashes for the mmr identified by MMRSize, this is also the packed accumulator for the tree state. + // All inclusion proofs for any node under MMRSize will lead directly to one + // of these peaks, or can be extended to do so. + Peaks [][]byte `cbor:"8,keyasint,omitempty"` // Version 1+ // Timestamp is the unix time (milliseconds) read at the time the root was // signed. Including it allows for the same root to be re-signed. Timestamp int64 `cbor:"3,keyasint"` @@ -43,16 +85,23 @@ type MMRState struct { CommitmentEpoch uint32 `cbor:"6,keyasint"` } +type MMRStateReceipts struct { + // A Pre-signed COSE Receipts MMRIVER COSE_Sign1 message for each peak in the MMR identified by MMRSize. + // To create a receipt, simply attach the inclusion proof to the unprotected header for the appropriate PeakIndex. + // PeakReceipts []cbor.RawMessage `cbor:"-65931,keyasint"` + PeakReceipts [][]byte `cbor:"-65931,keyasint"` +} + // RootSigner is used to produce a signature over an mmr log state. This // signature commits to a log state, and should only be created and published // after checking the consistency between the last signed state and the new one. // See merklelog/mmrblobs/logconfirmer.go:LogConfirmer for expected use. type RootSigner struct { issuer string - cborCodec dtcbor.CBORCodec + cborCodec commoncbor.CBORCodec } -func NewRootSigner(issuer string, cborCodec dtcbor.CBORCodec) RootSigner { +func NewRootSigner(issuer string, cborCodec commoncbor.CBORCodec) RootSigner { rs := RootSigner{ issuer: issuer, cborCodec: cborCodec, @@ -63,17 +112,41 @@ func NewRootSigner(issuer string, cborCodec dtcbor.CBORCodec) RootSigner { // Sign1 singes the provides state WARNING: You MUST check the state is // consistent with the most recently signed state before publishing this with a // datatrails signature. -func (rs RootSigner) Sign1(coseSigner cose.Signer, keyIdentifier string, publicKey *ecdsa.PublicKey, subject string, state MMRState, external []byte) ([]byte, error) { - payload, err := rs.cborCodec.MarshalCBOR(state) +func (rs RootSigner) Sign1( + coseSigner cose.Signer, + keyIdentifier string, + publicKey *ecdsa.PublicKey, + subject string, + state MMRState, external []byte) ([]byte, error) { + + receipts, err := rs.signEmptyPeakReceipts(coseSigner, publicKey, keyIdentifier, rs.issuer, subject, state.Peaks) if err != nil { return nil, err } + if len(receipts) != len(state.Peaks) { + return nil, fmt.Errorf("receipt vs peak count mismatch: %d vs %d", len(receipts), len(state.Peaks)) + } coseHeaders := cose.Headers{ Protected: cose.ProtectedHeader{ - dtcose.HeaderLabelCWTClaims: dtcose.NewCNFClaim( + commoncose.HeaderLabelCWTClaims: commoncose.NewCNFClaim( rs.issuer, subject, keyIdentifier, coseSigner.Algorithm(), *publicKey), }, + // one receipt is present for each peak identified by tree-size-2 in + // the protected header each receipt is individualy signed + // COSE_Sign1 message over that specific peak. All receipts of + // inclusion for individual leaves are created by attaching proofs + // to the unprotected header of the peak receipt. + // SealPeakReceiptsLabel: receipts, + // RawUnprotected: rawunprotected, + Unprotected: cose.UnprotectedHeader{ + SealPeakReceiptsLabel: receipts, + }, + } + + payload, err := rs.cborCodec.MarshalCBOR(state) + if err != nil { + return nil, err } msg := cose.Sign1Message{ @@ -85,29 +158,177 @@ func (rs RootSigner) Sign1(coseSigner cose.Signer, keyIdentifier string, publicK return nil, err } - // We purposefully detach the root so that verifiers are forced to obtain it + // We purposefully detach the peaks so that verifiers are forced to obtain it // from the log. - state.Root = nil + state.LegacySealRoot = nil + state.Peaks = nil + payload, err = rs.cborCodec.MarshalCBOR(state) if err != nil { return nil, err } + msg.Payload = payload - return msg.MarshalCBOR() + encodable, err := commoncose.NewCoseSign1Message(&msg) + if err != nil { + return nil, err + } + return encodable.MarshalCBOR() +} + +// signEmptyPeakReceipts signs and encodes a COSE Receipt (MMRIVER) for each +// peak in the accumulator. +// +// The most natural place to produce the pre-signed receipts is in the the log +// confirmer, because we are allways pre-signing *peaks* of the MMR. And the +// consistency between peaks (accumulators) is the concern of the sealer by way +// of LogConfirmer. And the most natural place to store them is in the massif +// seal. Which is what we accomodate here. +// +// It is a specific property of MMR based logs that proofs of inclusion always +// lead to an accumulator peak. This leads to the ability to pre-sign receipts +// *once* for all possible inclusion proofs in the current mmr state by simply +// singing the peak and leaving the proof empty. Because the proofs are never +// signed, (the are attached in the unprotected header), Those can be added on +// demand in a completely trustless way. +// +// Importantly, this allows for self service *privacy preserving*, scitt +// compatible, receipts based on replicated copies of the log. The signing key +// is not required to attach the proof. +// +// Notice that, due to the Low Update Frequency property, defined in +// https://eprint.iacr.org/2015/718.pdf, *many* MMR sizes will contain the same +// peak. Over time, the signed peak for any element changes less and less +// frequently (log base 2). This means, in addition to being able to pre-sign, +// the work required of a receipt holder to check the log remains consistent +// with their old receipt gets less and less. And, in the case of a receipt +// against an unequivocal log state, completely redundant. The receipt holders +// can also significantly compress the receipt data they retain. +// +// It is true, due to low update frequency, that many may be copies of earlier +// receipts, but the locality here means consumers only need to hit one blob and +// in doing so reveal less about their area of interest. +func (c *RootSigner) signEmptyPeakReceipts( + coseSigner cose.Signer, + publicKey *ecdsa.PublicKey, + keyIdentifier string, + issuer string, + subject string, + peaks [][]byte, +) ([][]byte, error) { + + receipts := make([][]byte, len(peaks)) + + for i, peak := range peaks { + receipt, err := c.signEmptyPeakReceipt(coseSigner, publicKey, keyIdentifier, issuer, subject, peak) + if err != nil { + return nil, err + } + + receipts[i] = receipt + } + return receipts, nil +} + +// signEmptyPeakReceipt signes a Receipt for an accumulator peak. +// +// Because many inclusion proofs lead to the same peak, the proof material for +// the unprotected header is empty. This can be added by the log consumer in a +// privacy preserving way based on replicated massif content. +// +// Arguments: +// +// ctx: The context for the operation +// coseSigner: The signer of the completed shared receipt +// issuer: The identifier for the issuer of the receipt +// subject: The identifier for the subject of the receipt +func (rs RootSigner) signEmptyPeakReceipt( + coseSigner cose.Signer, + publicKey *ecdsa.PublicKey, + keyIdentifier string, + issuer string, + subject string, + // The bytes of a peak, which an mmr node which is a member of an accumulator for one or more tree states. + peak []byte, +) ([]byte, error) { + + if len(peak) != 32 { + return nil, fmt.Errorf("%w: peak must be 32 bytes, got %d", ErrNodeSize, len(peak)) + } + + headers := cose.Headers{ + Protected: cose.ProtectedHeader{ + VDSCoseReceiptsTag: VDSMMRiver, + cose.HeaderLabelAlgorithm: coseSigner.Algorithm(), + cose.HeaderLabelKeyID: []byte(keyIdentifier), + commoncose.HeaderLabelCWTClaims: commoncose.NewCNFClaim( + issuer, + subject, + keyIdentifier, + coseSigner.Algorithm(), + *publicKey), + }, + // The receipt producer, which MAY be the relying party in possesion of + // a log massif, can fill in the inclusion proof directly and + // independently, without revealing the item of interest to the log + // service. + Unprotected: cose.UnprotectedHeader{}, + } + + msg := cose.Sign1Message{ + Headers: headers, + Payload: peak, + } + + err := msg.Sign(rand.Reader, nil, coseSigner) + if err != nil { + return nil, err + } + + // now, detach the payload + msg.Payload = nil + + // Use the appropraite encoding options + encodable, err := commoncose.NewCoseSign1Message(&msg) + if err != nil { + return nil, err + } + return encodable.MarshalCBOR() } -func NewRootSignerCodec() (dtcbor.CBORCodec, error) { - codec, err := dtcbor.NewCBORCodec( - dtcbor.NewDeterministicEncOpts(), - dtcbor.NewDeterministicDecOpts(), // unsigned int decodes to uint64 - ) +func NewRootSignerCodec() (commoncbor.CBORCodec, error) { + codec, err := commoncbor.NewCBORCodec(encOptions, decOptions) if err != nil { - return dtcbor.CBORCodec{}, err + return commoncbor.CBORCodec{}, err } return codec, nil } -func newDecOptions() []dtcose.SignOption { - return []dtcose.SignOption{dtcose.WithDecOptions(dtcbor.NewDeterministicDecOpts())} +var ( + encOptions = commoncbor.NewDeterministicEncOpts() + decOptions = cbor.DecOptions{ + DupMapKey: cbor.DupMapKeyEnforcedAPF, // (default) duplicated key not allowed + IndefLength: cbor.IndefLengthForbidden, // (default) no streaming + // override the default decoding behaviour for unsigned integers to retain the sign + IntDec: cbor.IntDecConvertNone, // decode CBOR uint/int to Go int64 + TagsMd: cbor.TagsForbidden, // (default) no tags + } +) + +// CheckpointDecOptions returns the decoding options compatible with the RootSigner +// With these options the sign is always retained +// The options align with the cbor defaults, except for the handling of unsigned integers. +func CheckpointDecOptions() cbor.DecOptions { + return decOptions +} + +// CheckpointEncOptions returns the decoding options compatible with the RootSigner +// These options align with the cbor defaults +func CheckpointEncOptions() cbor.EncOptions { + return encOptions +} + +func newCheckpointDecOptions() []commoncose.SignOption { + return []commoncose.SignOption{commoncose.WithDecOptions(decOptions)} } diff --git a/massifs/rootsigner_test.go b/massifs/rootsigner_test.go index 69ac965..64bf874 100644 --- a/massifs/rootsigner_test.go +++ b/massifs/rootsigner_test.go @@ -2,15 +2,350 @@ package massifs import ( "crypto/elliptic" + "crypto/rand" "testing" "github.com/datatrails/go-datatrails-common/azkeys" - dtcose "github.com/datatrails/go-datatrails-common/cose" + commoncose "github.com/datatrails/go-datatrails-common/cose" + _ "github.com/fxamacker/cbor/v2" + "github.com/veraison/go-cose" + _ "github.com/veraison/go-cose" + "github.com/datatrails/go-datatrails-common/logger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +// TestCoseSign1_UnprotectedEncDec just checks our asumptions about how to encode and decode +// nested cose messages in the unprotected headers of a cose sign1 message. +// There are some gotcha's in the encoding rules when nesting cose messages and this test is used +// to isolate the aspects we care about for the MMRIVER pre-signed receipts. +func TestCoseSign1_UnprotectedEncDec(t *testing.T) { + logger.New("TEST") + + key := TestGenerateECKey(t, elliptic.P256()) + cborCodec, err := NewRootSignerCodec() + require.NoError(t, err) + coseSigner := azkeys.NewTestCoseSigner(t, key) + rs := TestNewRootSigner(t, "test-issuer") + + mustMarshalCBOR := func(value any) []byte { + b, err := cborCodec.MarshalCBOR(value) + require.NoError(t, err) + return b + } + + mustSignPeak := func(peak []byte) []byte { + b, err := rs.signEmptyPeakReceipt(coseSigner, &key.PublicKey, "test-key", "test-issuer", "test-subject", peak) + require.NoError(t, err) + return b + } + + mustSignPeaks := func(peaks [][]byte) [][]byte { + receipts, err := rs.signEmptyPeakReceipts(coseSigner, &key.PublicKey, "test-key", "test-issuer", "test-subject", peaks) + require.NoError(t, err) + return receipts + } + + mustSignMessage := func(payload []byte, headers cose.Headers) []byte { + + headers.Protected[commoncose.HeaderLabelCWTClaims] = commoncose.NewCNFClaim( + "test-issuer", "test-subject", "test-key", coseSigner.Algorithm(), + key.PublicKey, + ) + + msg := cose.Sign1Message{ + Headers: headers, + Payload: payload, + } + err := msg.Sign(rand.Reader, nil, coseSigner) + require.NoError(t, err) + + encodable, err := commoncose.NewCoseSign1Message(&msg) + require.NoError(t, err) + encoded, err := encodable.MarshalCBOR() + require.NoError(t, err) + return encoded + } + + verifyDecoded := func(decoded *commoncose.CoseSign1Message) error { + _, ok := decoded.Headers.Protected[commoncose.HeaderLabelCWTClaims] + if ok { + return decoded.VerifyWithCWTPublicKey(nil) + } + return decoded.VerifyWithPublicKey(&key.PublicKey, nil) + } + + testDecodVerify := func(encoded []byte, t *testing.T) { + decoded, err := commoncose.NewCoseSign1MessageFromCBOR(encoded) + assert.NoError(t, err) + + err = verifyDecoded(decoded) + assert.NoError(t, err) + } + + testDecodeSingleNestedVerify := func(encoded []byte, t *testing.T) { + + var err error + var decoded *commoncose.CoseSign1Message + decoded, err = commoncose.NewCoseSign1MessageFromCBOR(encoded) + assert.NoError(t, err) + + err = verifyDecoded(decoded) + assert.NoError(t, err) + + singleNested, ok := decoded.Headers.Unprotected[int64(-65535-1)] + assert.True(t, ok) + if !ok { + return + } + b, ok := singleNested.([]byte) + assert.True(t, ok) + if !ok { + return + } + decoded, err = commoncose.NewCoseSign1MessageFromCBOR(b) + assert.NoError(t, err) + err = verifyDecoded(decoded) + assert.NoError(t, err) + return + } + + testDecodeArrayOfNestedVerify := func(encoded []byte, t *testing.T) { + + var err error + var decoded *commoncose.CoseSign1Message + decoded, err = commoncose.NewCoseSign1MessageFromCBOR(encoded) + assert.NoError(t, err) + err = verifyDecoded(decoded) + assert.NoError(t, err) + + arrayOfNested, ok := decoded.Headers.Unprotected[int64(-65535-2)] + assert.True(t, ok) + if !ok { + return + } + outer, ok := arrayOfNested.([]interface{}) + assert.True(t, ok) + for _, inner := range outer { + b, ok := inner.([]byte) + assert.True(t, ok) + if !ok { + return + } + decoded, err := commoncose.NewCoseSign1MessageFromCBOR(b) + assert.NoError(t, err) + err = verifyDecoded(decoded) + assert.NoError(t, err) + } + } + + // TestDecode is a test case specific decoder test function + type TestDecode func(encoded []byte, t *testing.T) + + type fields struct { + Protected cose.ProtectedHeader + Unprotected cose.UnprotectedHeader + Payload []byte + } + tests := []struct { + name string + fields fields + testDecode TestDecode + }{ + { + name: "cbor payload, unprotected header with private range array of signed peaks", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 0: mustSignPeaks([][]byte{{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + }, { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + }, + }), + }, + Payload: mustMarshalCBOR(MMRState{ + MMRSize: 1, + Peaks: [][]byte{{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + }}, + Timestamp: 1234, + }), + }, + testDecode: testDecodVerify, + }, + + { + name: "cbor payload, unprotected header with private range signed peak", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 0: mustSignPeak([]byte{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + }), + }, + Payload: mustMarshalCBOR(MMRState{ + MMRSize: 1, + Peaks: [][]byte{{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + }}, + Timestamp: 1234, + }), + }, + testDecode: testDecodVerify, + }, + + { + name: "cbor payload, unprotected header with private range integer value", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 0: 123, + }, + Payload: mustMarshalCBOR(MMRState{ + MMRSize: 1, + Peaks: [][]byte{{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31}}, + Timestamp: 1234, + }), + }, + testDecode: testDecodVerify, + }, + + { + name: "unprotected header with private range nested signed message", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 1: mustSignMessage([]byte("hello continent"), cose.Headers{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + }), + }, + Payload: []byte("hello world"), + }, + testDecode: testDecodeSingleNestedVerify, + }, + { + name: "unprotected header with private range nested signed message", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 2: [][]byte{ + mustSignMessage([]byte("hello uk"), cose.Headers{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + }), + mustSignMessage([]byte("hello france"), cose.Headers{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + }), + }, + }, + Payload: []byte("hello world"), + }, + testDecode: testDecodeArrayOfNestedVerify, + }, + + { + name: "empty unprotected headers", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{}, + Payload: []byte("hello world"), + }, + testDecode: testDecodVerify, + }, + { + name: "unprotected header with private range integer value", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 0: 123, + }, + Payload: []byte("hello world"), + }, + testDecode: testDecodVerify, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + var err error + + // cborCodec, err := NewRootSignerCodec() + // require.NoError(t, err) + + headers := cose.Headers{ + Protected: tt.fields.Protected, + Unprotected: tt.fields.Unprotected, + } + + msg := cose.Sign1Message{ + Headers: headers, + Payload: tt.fields.Payload, + } + err = msg.Sign(rand.Reader, nil, coseSigner) + require.NoError(t, err) + + encodable, err := commoncose.NewCoseSign1Message(&msg) + assert.NoError(t, err) + encoded, err := encodable.MarshalCBOR() + assert.NoError(t, err) + + if tt.testDecode != nil { + tt.testDecode(encoded, t) + } + }) + } +} + func TestRootSigner_Sign1(t *testing.T) { logger.New("TEST") @@ -42,8 +377,12 @@ func TestRootSigner_Sign1(t *testing.T) { args: args{ subject: "merklelog-attestor", state: MMRState{ - MMRSize: 1, - Root: []byte{1}, + MMRSize: 1, + Peaks: [][]byte{{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31}}, Timestamp: 1234, }, }, @@ -68,9 +407,9 @@ func TestRootSigner_Sign1(t *testing.T) { signed, state, err := DecodeSignedRoot(rs.cborCodec, coseMsg) assert.NoError(t, err) - err = VerifySignedRoot( + err = VerifySignedCheckPoint( rs.cborCodec, - dtcose.NewCWTPublicKeyProvider(signed), + commoncose.NewCWTPublicKeyProvider(signed), signed, state, nil, ) // verification must fail if we haven't put the root in @@ -79,10 +418,10 @@ func TestRootSigner_Sign1(t *testing.T) { // This is step 2. Usually we would work out the massif, read that // blob then compute the root from it by passing MMRState.MMRSize to // GetRoot - state.Root = tt.args.state.Root - err = VerifySignedRoot( + state.Peaks = tt.args.state.Peaks + err = VerifySignedCheckPoint( rs.cborCodec, - dtcose.NewCWTPublicKeyProvider(signed), + commoncose.NewCWTPublicKeyProvider(signed), signed, state, nil, ) diff --git a/massifs/rootsigverify.go b/massifs/rootsigverify.go index 235408a..c0ecd41 100644 --- a/massifs/rootsigverify.go +++ b/massifs/rootsigverify.go @@ -13,11 +13,11 @@ type publicKeyProvider interface { } // DecodeSignedRoot decodes the MMRState values from the signed message -// See VerifySignedRoot for a description of how to verify a signed root +// See VerifySignedCheckPoint for a description of how to verify a signed root func DecodeSignedRoot( codec cbor.CBORCodec, msg []byte, ) (*dtcose.CoseSign1Message, MMRState, error) { - signed, err := dtcose.NewCoseSign1MessageFromCBOR(msg, newDecOptions()...) + signed, err := dtcose.NewCoseSign1MessageFromCBOR(msg, newCheckpointDecOptions()...) if err != nil { return nil, MMRState{}, err } @@ -30,19 +30,19 @@ func DecodeSignedRoot( return signed, unverifiedState, nil } -// VerifySignedRoot applies the provided state to the signed message and +// VerifySignedCheckPoint applies the provided state to the signed message and // verifies the result // -// When signing and publishing roots, we remove the root from the signed message -// prior to publishing. So that it can only be verified by recovering the root +// When signing and publishing roots, we remove the peaks from the signed message +// prior to publishing. So that it can only be verified by recovering the peaks // from the mmr at the size in the signed message. // // Verification of a signed root is a 3 step process: // 1. Use DecodeSignedRoot to obtain the MMRState from the signed message. This -// state will not verify as the root has been removed after signing. -// 2. Use MMRState.MMRSize to obtain the root of the log corresponding to that size -// 3. Update the MMRState with the derived root and call this function to complete the verification -func VerifySignedRoot( +// state will not verify as the peaks have been removed after signing. +// 2. Use MMRState.MMRSize to obtain the peaks of the log corresponding to that size +// 3. Update the MMRState with the derived peaks and call this function to complete the verification +func VerifySignedCheckPoint( codec cbor.CBORCodec, keyProvider publicKeyProvider, signed *dtcose.CoseSign1Message, unverifiedState MMRState, external []byte) error { var err error diff --git a/massifs/signedrootreader.go b/massifs/signedrootreader.go index c07790b..d4bd861 100644 --- a/massifs/signedrootreader.go +++ b/massifs/signedrootreader.go @@ -156,7 +156,7 @@ func (s *SignedRootReader) GetSignedRoot( // Get the signed tree head (SignedRoot) for the mmr massif. // // NOTICE: TO VERIFY YOU MUST obtain the mmr root from the log using the -// MMRState.MMRSize in the returned MMRState. See {@link VerifySignedRoot} +// MMRState.MMRSize in the returned MMRState. See {@link VerifySignedCheckPoint} // // This may not be the latest mmr head, but it will be the latest for the // argument massifIndex. If the identified massif is complete, the returned SignedRoot diff --git a/massifs/testcommitter.go b/massifs/testcommitter.go index eb7271b..236e717 100644 --- a/massifs/testcommitter.go +++ b/massifs/testcommitter.go @@ -88,14 +88,14 @@ func (c *TestMinimalCommitter) ContextCommitted(ctx context.Context, tenantIdent if mmrSize == 0 { return errors.New("no leaves to seal") } - root, err := mmr.GetRoot(mmrSize, &mc, sha256.New()) + peaks, err := mmr.PeakHashes(&mc, mmrSize-1) if err != nil { return err } state := MMRState{ MMRSize: mmrSize, - Root: root, + Peaks: peaks, Timestamp: time.Now().UnixMilli(), CommitmentEpoch: c.cfg.CommitmentEpoch, IDTimestamp: mc.GetLastIdTimestamp(), diff --git a/mmr/add.go b/mmr/add.go index b547578..9ab9e58 100644 --- a/mmr/add.go +++ b/mmr/add.go @@ -48,13 +48,11 @@ func AddHashedLeaf(store NodeAppender, hasher hash.Hash, hashedLeaf []byte) (uin for IndexHeight(i) > height { iLeft := i - (2 << height) - // XXX: TODO: I believe iRight is always just i - 1 + // iRight is always just i - 1 // because i - (2 << height ) + SiblingOffset(height) // => i - (2 << height ) + (2 << height) - 1 // => i - 1 - // And, intuitively, the 'next' i is always last i + 1, and that is - // always going to be RHS when we are adding - iRight := iLeft + SiblingOffset(height) + iRight := i - 1 hasher.Reset() diff --git a/mmr/bits.go b/mmr/bits.go index 87fa953..833fca4 100644 --- a/mmr/bits.go +++ b/mmr/bits.go @@ -13,7 +13,7 @@ func Log2Uint64(num uint64) uint64 { } func Log2Uint32(num uint32) uint32 { - return uint32(bits.Len32(num)) + return uint32(bits.Len32(num) - 1) } func AllOnes(num uint64) bool { diff --git a/mmr/bits_test.go b/mmr/bits_test.go index c8e2cd5..0c3ad2a 100644 --- a/mmr/bits_test.go +++ b/mmr/bits_test.go @@ -32,3 +32,32 @@ func TestLog2Uint64(t *testing.T) { }) } } + +func TestLog2Uint32(t *testing.T) { + type args struct { + num uint32 + } + tests := []struct { + name string + args args + want uint32 + }{ + {"1 -> 0", args{1}, 0}, + {"2 -> 1", args{2}, 1}, + {"3 -> 1", args{3}, 1}, + {"4 -> 2", args{4}, 2}, + {"8 -> 3", args{8}, 3}, + {"16 -> 4", args{16}, 4}, + {"17 -> 4", args{17}, 4}, + {"18 -> 4", args{18}, 4}, + {"19 -> 4", args{19}, 4}, + {"32 -> 5", args{32}, 5}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := Log2Uint32(tt.args.num); got != tt.want { + t.Errorf("Log2Uint64() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/mmr/consistentroots.go b/mmr/consistentroots.go new file mode 100644 index 0000000..7480370 --- /dev/null +++ b/mmr/consistentroots.go @@ -0,0 +1,52 @@ +package mmr + +import ( + "bytes" + "errors" + "hash" +) + +var ( + ErrAccumulatorProofLen = errors.New("a proof for each accumulator is required") +) + +// ConsistentRoots is supplied with the accumulator from which consistency is +// being shown, and an inclusion proof for each accumulator entry in a future MMR +// state. +// +// The algorithm recovers the necessary prefix (peaks) of the future +// accumulator against which the proofs were obtained. +// It is typical that many nodes in the original accumulator share the same peak in the new accumulator. +// The returned list will be a descending height ordered list of elements from the +// accumulator for the consistent future state. It may be exactly the future +// accumulator or it may be a prefix of it. +// +// The order of the roots returned matches the order of the nodes in the accumulator. +// +// Args: +// - ifrom the last node index of the the complete MMR from which consistency was proven. +// - accumulatorfrom the node values correponding to the peaks of the accumulator at MMR(sizeA) +// - proofs the inclusion proofs for each node in accumulatorfrom in MMR(sizeB) +func ConsistentRoots(hasher hash.Hash, ifrom uint64, accumulatorfrom [][]byte, proofs [][][]byte) ([][]byte, error) { + frompeaks := Peaks(ifrom) + + if len(frompeaks) != len(proofs) { + return nil, ErrAccumulatorProofLen + } + + roots := [][]byte{} + + for i := 0; i < len(accumulatorfrom); i++ { + // remembering that peaks are 1 based (for now) + root := IncludedRoot(hasher, frompeaks[i], accumulatorfrom[i], proofs[i]) + // The nature of MMR's is that many nodes are committed by the + // same accumulator peak, and that peak changes with + // low frequency. + if len(roots) > 0 && bytes.Equal(roots[len(roots)-1], root) { + continue + } + roots = append(roots, root) + } + + return roots, nil +} diff --git a/mmr/draft_kat39_test.go b/mmr/draft_kat39_test.go new file mode 100644 index 0000000..d6ac7d8 --- /dev/null +++ b/mmr/draft_kat39_test.go @@ -0,0 +1,224 @@ +package mmr + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// tests and KAT data corresponding to the MMRIVER draft + +var ( + KAT39CompleteMMRSizes = []uint64{1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26, 31, 32, 34, 35, 38, 39} + KAT39CompleteMMRIndices = []uint64{0, 2, 3, 6, 7, 9, 10, 14, 15, 17, 18, 21, 22, 24, 25, 30, 31, 33, 34, 37, 38} + KAT39LeafMMRIndices = []uint64{0, 1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26, 31, 32, 34, 35, 38} + KAT39PeakIndices = map[uint64][]uint64{ + 0: {0}, + 2: {2}, + 3: {2, 3}, + 6: {6}, + 7: {6, 7}, + 9: {6, 9}, + 10: {6, 9, 10}, + 14: {14}, + 15: {14, 15}, + 17: {14, 17}, + 18: {14, 17, 18}, + 21: {14, 21}, + 22: {14, 21, 22}, + 24: {14, 21, 24}, + 25: {14, 21, 24, 25}, + 30: {30}, + 31: {30, 31}, + 33: {30, 33}, + 34: {30, 33, 34}, + 37: {30, 37}, + 38: {30, 37, 38}, + } + // Note: its just easier all round to maintain these as hex strings and convert to bytes on demand. + KAT39PeakHashes = map[uint64][]string{ + 0: {"af5570f5a1810b7af78caf4bc70a660f0df51e42baf91d4de5b2328de0e83dfc"}, + 2: {"ad104051c516812ea5874ca3ff06d0258303623d04307c41ec80a7a18b332ef8"}, + 3: {"ad104051c516812ea5874ca3ff06d0258303623d04307c41ec80a7a18b332ef8", "d5688a52d55a02ec4aea5ec1eadfffe1c9e0ee6a4ddbe2377f98326d42dfc975"}, + 6: {"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88"}, + 7: {"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88", "a3eb8db89fc5123ccfd49585059f292bc40a1c0d550b860f24f84efb4760fbf2"}, + 9: {"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88", "b8faf5f748f149b04018491a51334499fd8b6060c42a835f361fa9665562d12d"}, + 10: {"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88", "b8faf5f748f149b04018491a51334499fd8b6060c42a835f361fa9665562d12d", "8d85f8467240628a94819b26bee26e3a9b2804334c63482deacec8d64ab4e1e7"}, + 14: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112"}, + 15: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "e66c57014a6156061ae669809ec5d735e484e8fcfd540e110c9b04f84c0b4504"}, + 17: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "f4a0db79de0fee128fbe95ecf3509646203909dc447ae911aa29416bf6fcba21"}, + 18: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "f4a0db79de0fee128fbe95ecf3509646203909dc447ae911aa29416bf6fcba21", "5bc67471c189d78c76461dcab6141a733bdab3799d1d69e0c419119c92e82b3d"}, + 21: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710"}, + 22: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710", "7a42e3892368f826928202014a6ca95a3d8d846df25088da80018663edf96b1c"}, + 24: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710", "dd7efba5f1824103f1fa820a5c9e6cd90a82cf123d88bd035c7e5da0aba8a9ae"}, + 25: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710", "dd7efba5f1824103f1fa820a5c9e6cd90a82cf123d88bd035c7e5da0aba8a9ae", "561f627b4213258dc8863498bb9b07c904c3c65a78c1a36bca329154d1ded213"}, + 30: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7"}, + 31: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "1664a6e0ea12d234b4911d011800bb0f8c1101a0f9a49a91ee6e2493e34d8e7b"}, + 33: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "0c9f36783b5929d43c97fe4b170d12137e6950ef1b3a8bd254b15bbacbfdee7f"}, + 34: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "0c9f36783b5929d43c97fe4b170d12137e6950ef1b3a8bd254b15bbacbfdee7f", "4d75f61869104baa4ccff5be73311be9bdd6cc31779301dfc699479403c8a786"}, + 37: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "6a169105dcc487dbbae5747a0fd9b1d33a40320cf91cf9a323579139e7ff72aa"}, + 38: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "6a169105dcc487dbbae5747a0fd9b1d33a40320cf91cf9a323579139e7ff72aa", "e9a5f5201eb3c3c856e0a224527af5ac7eb1767fb1aff9bd53ba41a60cde9785"}, + } + + KAT39Leaves = []string{ + "af5570f5a1810b7af78caf4bc70a660f0df51e42baf91d4de5b2328de0e83dfc", + "cd2662154e6d76b2b2b92e70c0cac3ccf534f9b74eb5b89819ec509083d00a50", + "d5688a52d55a02ec4aea5ec1eadfffe1c9e0ee6a4ddbe2377f98326d42dfc975", + "8005f02d43fa06e7d0585fb64c961d57e318b27a145c857bcd3a6bdb413ff7fc", + "a3eb8db89fc5123ccfd49585059f292bc40a1c0d550b860f24f84efb4760fbf2", + "4c0e071832d527694adea57b50dd7b2164c2a47c02940dcf26fa07c44d6d222a", + "8d85f8467240628a94819b26bee26e3a9b2804334c63482deacec8d64ab4e1e7", + "0b5000b73a53f0916c93c68f4b9b6ba8af5a10978634ae4f2237e1f3fbe324fa", + "e66c57014a6156061ae669809ec5d735e484e8fcfd540e110c9b04f84c0b4504", + "998e907bfbb34f71c66b6dc6c40fe98ca6d2d5a29755bc5a04824c36082a61d1", + "5bc67471c189d78c76461dcab6141a733bdab3799d1d69e0c419119c92e82b3d", + "1b8d0103e3a8d9ce8bda3bff71225be4b5bb18830466ae94f517321b7ecc6f94", + "7a42e3892368f826928202014a6ca95a3d8d846df25088da80018663edf96b1c", + "aed2b8245fdc8acc45eda51abc7d07e612c25f05cadd1579f3474f0bf1f6bdc6", + "561f627b4213258dc8863498bb9b07c904c3c65a78c1a36bca329154d1ded213", + "1209fe3bc3497e47376dfbd9df0600a17c63384c85f859671956d8289e5a0be8", + "1664a6e0ea12d234b4911d011800bb0f8c1101a0f9a49a91ee6e2493e34d8e7b", + "707d56f1f282aee234577e650bea2e7b18bb6131a499582be18876aba99d4b60", + "4d75f61869104baa4ccff5be73311be9bdd6cc31779301dfc699479403c8a786", + "0764c726a72f8e1d245f332a1d022fffdada0c4cb2a016886e4b33b66cb9a53f", + "e9a5f5201eb3c3c856e0a224527af5ac7eb1767fb1aff9bd53ba41a60cde9785", + } + + KAT39Nodes = []string{ + "af5570f5a1810b7af78caf4bc70a660f0df51e42baf91d4de5b2328de0e83dfc", + "cd2662154e6d76b2b2b92e70c0cac3ccf534f9b74eb5b89819ec509083d00a50", + "ad104051c516812ea5874ca3ff06d0258303623d04307c41ec80a7a18b332ef8", + "d5688a52d55a02ec4aea5ec1eadfffe1c9e0ee6a4ddbe2377f98326d42dfc975", + "8005f02d43fa06e7d0585fb64c961d57e318b27a145c857bcd3a6bdb413ff7fc", + "9a18d3bc0a7d505ef45f985992270914cc02b44c91ccabba448c546a4b70f0f0", + "827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88", + "a3eb8db89fc5123ccfd49585059f292bc40a1c0d550b860f24f84efb4760fbf2", + "4c0e071832d527694adea57b50dd7b2164c2a47c02940dcf26fa07c44d6d222a", + "b8faf5f748f149b04018491a51334499fd8b6060c42a835f361fa9665562d12d", + "8d85f8467240628a94819b26bee26e3a9b2804334c63482deacec8d64ab4e1e7", + "0b5000b73a53f0916c93c68f4b9b6ba8af5a10978634ae4f2237e1f3fbe324fa", + "6f3360ad3e99ab4ba39f2cbaf13da56ead8c9e697b03b901532ced50f7030fea", + "508326f17c5f2769338cb00105faba3bf7862ca1e5c9f63ba2287e1f3cf2807a", + "78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", + "e66c57014a6156061ae669809ec5d735e484e8fcfd540e110c9b04f84c0b4504", + "998e907bfbb34f71c66b6dc6c40fe98ca6d2d5a29755bc5a04824c36082a61d1", + "f4a0db79de0fee128fbe95ecf3509646203909dc447ae911aa29416bf6fcba21", + "5bc67471c189d78c76461dcab6141a733bdab3799d1d69e0c419119c92e82b3d", + "1b8d0103e3a8d9ce8bda3bff71225be4b5bb18830466ae94f517321b7ecc6f94", + "0a4d7e66c92de549b765d9e2191027ff2a4ea8a7bd3eb04b0ed8ee063bad1f70", + "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710", + "7a42e3892368f826928202014a6ca95a3d8d846df25088da80018663edf96b1c", + "aed2b8245fdc8acc45eda51abc7d07e612c25f05cadd1579f3474f0bf1f6bdc6", + "dd7efba5f1824103f1fa820a5c9e6cd90a82cf123d88bd035c7e5da0aba8a9ae", + "561f627b4213258dc8863498bb9b07c904c3c65a78c1a36bca329154d1ded213", + "1209fe3bc3497e47376dfbd9df0600a17c63384c85f859671956d8289e5a0be8", + "6b4a3bd095c63d1dffae1ac03eb8264fdce7d51d2ac26ad0ebf9847f5b9be230", + "4459f4d6c764dbaa6ebad24b0a3df644d84c3527c961c64aab2e39c58e027eb1", + "77651b3eec6774e62545ae04900c39a32841e2b4bac80e2ba93755115252aae1", + "d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", + "1664a6e0ea12d234b4911d011800bb0f8c1101a0f9a49a91ee6e2493e34d8e7b", + "707d56f1f282aee234577e650bea2e7b18bb6131a499582be18876aba99d4b60", + "0c9f36783b5929d43c97fe4b170d12137e6950ef1b3a8bd254b15bbacbfdee7f", + "4d75f61869104baa4ccff5be73311be9bdd6cc31779301dfc699479403c8a786", + "0764c726a72f8e1d245f332a1d022fffdada0c4cb2a016886e4b33b66cb9a53f", + "c861552e9e17c41447d375c37928f9fa5d387d1e8470678107781c20a97ebc8f", + "6a169105dcc487dbbae5747a0fd9b1d33a40320cf91cf9a323579139e7ff72aa", + "e9a5f5201eb3c3c856e0a224527af5ac7eb1767fb1aff9bd53ba41a60cde9785", + } +) + +func hexHashList(hashes [][]byte) []string { + var hexes []string + for _, b := range hashes { + hexes = append(hexes, hex.EncodeToString(b)) + } + return hexes +} + +func mustHex2Hash(t *testing.T, hexEncodedHash string) []byte { + b, err := hex.DecodeString(hexEncodedHash) + require.NoError(t, err) + return b +} + +type testDBLinear struct { + nodes [][]byte +} + +func (db *testDBLinear) Get(i uint64) ([]byte, error) { + if int(i) < len(db.nodes) { + return db.nodes[i], nil + } + return nil, fmt.Errorf("index %d out of range", i) +} + +// Append adds a new node to the db and returns the index of the next addition +func (db *testDBLinear) Append(b []byte) (uint64, error) { + db.nodes = append(db.nodes, b) + return uint64(len(db.nodes)), nil +} + +// TestDraftAddHashedLeaf tests that AddHashedLeaf creates the expected KAT39 MMR +func TestDraftAddHashedLeaf(t *testing.T) { + db := &testDBLinear{} + for e, leaf := range KAT39Leaves { + leafHash := mustHex2Hash(t, leaf) + iNext, err := AddHashedLeaf(db, sha256.New(), leafHash) + assert.NoError(t, err) + assert.Equal(t, MMRIndex(uint64(e+1)), iNext) + } + assert.Equal(t, len(KAT39Nodes), len(db.nodes)) + for i := 0; i < len(KAT39Nodes); i++ { + assert.Equal(t, mustHex2Hash(t, KAT39Nodes[i]), db.nodes[i]) + } +} + +// TestDraftAddLeafAccumulators tests that the AddHashedLeaf produces the expected accumulator states +func TestDraftAddLeafAccumulators(t *testing.T) { + db := &testDBLinear{} + for _, leaf := range KAT39Leaves { + leafHash := mustHex2Hash(t, leaf) + _, err := AddHashedLeaf(db, sha256.New(), leafHash) + assert.NoError(t, err) + } + + // Check the peaks are all in the expected places. + for i, wantPeaks := range KAT39PeakHashes { + peaks, err := PeakHashes(db, i) + assert.NoError(t, err) + assert.Equal(t, wantPeaks, hexHashList(peaks)) + } +} + +// TestDraftKAT39PeakHashes tests that the peak indices match the KAT39 values +func TestDraftKAT39Peaks(t *testing.T) { + for mmrIndex, wantPeaks := range KAT39PeakIndices { + t.Run(fmt.Sprintf("%d", mmrIndex), func(t *testing.T) { + if got := Peaks(mmrIndex); !reflect.DeepEqual(got, wantPeaks) { + t.Errorf("Peaks() = %v, want %v", got, wantPeaks) + } + }) + } +} + +// TestDraftKAT39PeakHashes tests that the peak indices obtain the expected KAT39 hashes +func TestDraftKAT39PeakHashes(t *testing.T) { + + db := NewCanonicalTestDB(t) + + for mmrIndex, wantPeaksHex := range KAT39PeakHashes { + t.Run(fmt.Sprintf("%d", mmrIndex), func(t *testing.T) { + peakHashes, err := PeakHashes(db, mmrIndex) + require.NoError(t, err) + peakHashesHex := hexHashList(peakHashes) + if !reflect.DeepEqual(peakHashesHex, wantPeaksHex) { + t.Errorf("PeakHashes() = %v, want %v", peakHashesHex, wantPeaksHex) + } + }) + } +} diff --git a/mmr/firstmmrsize.go b/mmr/firstmmrsize.go new file mode 100644 index 0000000..ef7369c --- /dev/null +++ b/mmr/firstmmrsize.go @@ -0,0 +1,31 @@ +package mmr + +// FirstMMRSize returns the first complete MMRSize that contains the provided +// mmrIndex. mmrIndices are used to identify nodes. mmrSizes are the result of +// *adding* nodes to mmr's, and, because of adding the back fill nodes for the +// leaves, the range of valid sizes is not continuous. Typically, it is +// possible to "do the right thing" with just LeafCount, but its use is error +// prone because of this fact. +// +// The outputs of this function for the following mmrIndices are +// +// [1, 3, 3, 4, 7, 7, 7, 8, 10, 10, 11] +// +// 2 6 +// / \ +// 1 2 5 9 +// / \ / \ / \ +// 0 0 1 3 4 7 8 10 +func FirstMMRSize(mmrIndex uint64) uint64 { + + i := mmrIndex + h0 := IndexHeight(i) + h1 := IndexHeight(i + 1) + for h0 < h1 { + i++ + h0 = h1 + h1 = IndexHeight(i + 1) + } + + return i + 1 +} diff --git a/mmr/go.mod b/mmr/go.mod index 3dea1cd..0a5fae5 100644 --- a/mmr/go.mod +++ b/mmr/go.mod @@ -8,6 +8,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/mmr/go.sum b/mmr/go.sum index c18bab0..aa1e07e 100644 --- a/mmr/go.sum +++ b/mmr/go.sum @@ -11,8 +11,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/mmr/hashpospair.go b/mmr/hashpospair.go new file mode 100644 index 0000000..1b88e6e --- /dev/null +++ b/mmr/hashpospair.go @@ -0,0 +1,13 @@ +package mmr + +import "hash" + +// HashPosPair64 returns H(pos || a || b) +// ** the hasher is reset ** +func HashPosPair64(hasher hash.Hash, pos uint64, a []byte, b []byte) []byte { + hasher.Reset() + HashWriteUint64(hasher, pos) + hasher.Write(a) + hasher.Write(b) + return hasher.Sum(nil) +} diff --git a/mmr/includedroot.go b/mmr/includedroot.go new file mode 100644 index 0000000..7861d43 --- /dev/null +++ b/mmr/includedroot.go @@ -0,0 +1,49 @@ +package mmr + +import "hash" + +// IncludedRoot calculates the accumulator peak for the provided +// proof and node value. Note that both interior and leaf nodes are handled +// identically +// +// Arguments: +// - i is the index the nodeHash is to be shown at +// - nodehash the value whose inclusion is to be shown +// - proof is the path of ibling values committing i. They recreate the unique +// accumulator peak that committed i to the MMR state from which the proof was +// produced. +func IncludedRoot(hasher hash.Hash, i uint64, nodeHash []byte, proof [][]byte) []byte { + + root := nodeHash + + g := IndexHeight(i) + + for _, sibling := range proof { + + // If the index after i is higher, it is the left parent, + // and i is the right sibling + + if IndexHeight(i+1) > g { + + // The parent of a right sibling is stored immediately after + i = i + 1 + + // Set `root` to `H(i+1 || sibling || root)` + root = HashPosPair64(hasher, i+1, sibling, root) + } else { + + // The parent of a left sibling is stored immediately after + // its right sibling. + + i = i + (2 << g) + + // Set `root` to `H(i+1 || root || sibling)` + root = HashPosPair64(hasher, i+1, root, sibling) + } + + // Set g to the height of the next item in the path. + g = g + 1 + } + + return root +} diff --git a/mmr/indexheight.go b/mmr/indexheight.go index 8289e3e..b94ec8a 100644 --- a/mmr/indexheight.go +++ b/mmr/indexheight.go @@ -13,9 +13,9 @@ import ( // JumpLeftPerfect is used to iteratively discover the left most node at the same // height as the node identified by pos. This is how we discover the height in -// the tree of an arbitrary pusition so as to avoid ever having to materialise -// the whole tree. It 'jumps left' by the size of the perfect tree which would -// contain pos. +// the tree of an arbitrary position so as to avoid ever having to materialize +// the whole tree. It 'jumps left' by the size of the largest perfect tree which would +// precede pos. // // So given, // @@ -29,10 +29,10 @@ import ( // / \ / \ / \ / \ / \ // 0 1 2 4 5 8 9 11 12 16 17 // -// JumpLeftPerfect(13) returns 6 because the size of the perfect tree containing 13 is -// 7. The next jump, JumpLeftPerfect(6) returns 3, because the perfect tree containing -// 6 is size 3. and the 'all ones' node is found. And the count of 1's - 1 is the -// height. +// JumpLeftPerfect(13) returns 6 because the size of the largest perfect tree +// preceding 13 is 7. The next jump, JumpLeftPerfect(6) returns 3, because the +// perfect tree preceding 6 is size 3. and the 'all ones' node is found. And +// the count of 1's - 1 is the index height. // // ** Note ** that pos is the *one based* position not the zero based index. func JumpLeftPerfect(pos uint64) uint64 { diff --git a/mmr/indexheight_test.go b/mmr/indexheight_test.go index 3f00028..5818d47 100644 --- a/mmr/indexheight_test.go +++ b/mmr/indexheight_test.go @@ -111,6 +111,20 @@ func TestIndexHeight(t *testing.T) { // 1 2 5 9 12 17 20 // / \ / \ / \ / \ / \ // 0 0 1 3 4 7 8 10 11 15 16 18 | 19 + { + "1", args{1}, 0, + }, + + { + "0", args{0}, 0, + }, + + { + "three is 0", args{3}, 0, + }, + { + "four is 0", args{4}, 0, + }, { "nine is 1", args{9}, 1, diff --git a/mmr/leafcount.go b/mmr/leafcount.go index eb4dff9..2292424 100644 --- a/mmr/leafcount.go +++ b/mmr/leafcount.go @@ -1,91 +1,15 @@ package mmr -import ( - "math" - "math/bits" -) - // LeafCount returns the number of leaves in the largest mmr whose size is <= // the supplied size. See also [merklelog/mmr/PeakBitmap] // // This can safely be use to obtain the leaf index *only* when size is known to // be a valid mmr size. Typically just before or just after calling AddHashedLeaf -// If in any doubt, instead do: -// -// leafIndex = LeafCount(FirstMMRSize(mmrIndex)) -1 +// If in any doubt, instead use LeafIndex() + 1 func LeafCount(size uint64) uint64 { return PeaksBitmap(size) } -// FirstMMRSize returns the first complete MMRSize that contains the provided -// mmrIndex. mmrIndices are used to identify nodes. mmrSizes are the result of -// *adding* nodes to mmr's, and, because of adding the back fill nodes for the -// leaves, the range of valid sizes is not continuous. Typically, it is -// possible to "do the right thing" with just LeafCount, but its use is error -// prone because of this fact. -// -// The outputs of this function for the following mmrIndices are -// -// [1, 3, 3, 4, 7, 7, 7, 8, 10, 10, 11] -// -// 2 6 -// / \ -// 1 2 5 9 -// / \ / \ / \ -// 0 0 1 3 4 7 8 10 -func FirstMMRSize(mmrIndex uint64) uint64 { - - i := mmrIndex - h0 := IndexHeight(i) - h1 := IndexHeight(i + 1) - for h0 < h1 { - i++ - h0 = h1 - h1 = IndexHeight(i + 1) - } - - return i + 1 -} - func LeafIndex(mmrIndex uint64) uint64 { return LeafCount(FirstMMRSize(mmrIndex)) - 1 } - -// PeakMap returns a bit mask where a 1 corresponds to a peak and the position -// of the bit is the height of that peak. The resulting value is also the count -// of leaves. This is due to the binary nature of the tree. -// -// For example, with an mmr with size 19, there are 11 leaves -// -// 14 -// / \ -// 6 13 -// / \ / \ -// 2 5 9 12 17 -// / \ / \ / \ / \ / \ -// -// 0 1 3 4 7 8 10 11 15 16 18 -// -// PeakMap(19) returns 0b1011 which shows, reading from the right (low bit), -// there are peaks, that the lowest peak is at height 0, the second lowest at -// height 1, then the next and last peak is at height 3. -// -// If the provided mmr size is invalid, the returned map will be for the largest -// valid mmr size < the provided invalid size. -func PeaksBitmap(mmrSize uint64) uint64 { - if mmrSize == 0 { - return 0 - } - pos := mmrSize - peakSize := uint64(math.MaxUint64) >> bits.LeadingZeros64(mmrSize) - peakMap := uint64(0) - for peakSize > 0 { - peakMap <<= 1 - if pos >= peakSize { - pos -= peakSize - peakMap |= 1 - } - peakSize >>= 1 - } - return peakMap -} diff --git a/mmr/mmrindex.go b/mmr/mmrindex.go new file mode 100644 index 0000000..7e9e532 --- /dev/null +++ b/mmr/mmrindex.go @@ -0,0 +1,23 @@ +package mmr + +import "math/bits" + +// MMRIndex returns the node index for the leaf e +// +// Args: +// - leafIndex: the leaf index, where the leaves are numbered consequtively, ignoring interior nodes. +// +// Returns: +// +// The mmr index for the element leafIndex +func MMRIndex(leafIndex uint64) uint64 { + + sum := uint64(0) + for leafIndex > 0 { + h := bits.Len64(leafIndex) + sum += (1 << h) - 1 + half := 1 << (h - 1) + leafIndex -= uint64(half) + } + return sum +} diff --git a/mmr/mmrindex_test.go b/mmr/mmrindex_test.go new file mode 100644 index 0000000..f9a201d --- /dev/null +++ b/mmr/mmrindex_test.go @@ -0,0 +1,41 @@ +package mmr + +import ( + "testing" +) + +func TestMMRIndex(t *testing.T) { + tests := []struct { + leafIndex uint64 + expected uint64 + }{ + {0, 0}, + {1, 1}, + {2, 3}, + {3, 4}, + {4, 7}, + {5, 8}, + {6, 10}, + {7, 11}, + {8, 15}, + {9, 16}, + {10, 18}, + {11, 19}, + {12, 22}, + {13, 23}, + {14, 25}, + {15, 26}, + {16, 31}, + {17, 32}, + {18, 34}, + {19, 35}, + {20, 38}, + } + + for _, test := range tests { + result := MMRIndex(test.leafIndex) + if result != test.expected { + t.Errorf("MMRIndex(%d) = %d; expected %d", test.leafIndex, result, test.expected) + } + } +} diff --git a/mmr/peaks.go b/mmr/peaks.go index 5af1e2e..b920b93 100644 --- a/mmr/peaks.go +++ b/mmr/peaks.go @@ -1,91 +1,223 @@ package mmr -// Peaks returns the array of mountain peaks in the MMR. This is completely -// deterministic given a valid mmr size. If the mmr size is invalid, this -// function returns nil. +import ( + "math/bits" +) + +// Peaks returns the array of mountain peak indices in the MMR. +// +// This is completely deterministic given a complete mmr index. +// If the mmr index is not complete, or is otherwise invalid, is invalid, this function returns nil. // -// It is guaranteed that the peaks are listed in ascending order of position -// value. The highest peak has the lowest position and is listed first. This is -// a consequence of the fact that the 'little' 'down range' peaks can only appear +// The peaks are listed in ascending order of mmr index value. +// The highest peak has the lowest index and is listed first. This is a +// consequence of the fact that the 'little' 'down range' peaks can only appear // to the 'right' of the first perfect peak, and so on recursively. // -// Note that as a matter of implementation convenience and efficency the peaks -// are returned as *one based positions* -// -// So given the example below, which has an mmrSize of 17, the peaks are [15, 18] -// -// 3 15 -// / \ -// / \ -// / \ -// 2 7 14 -// / \ / \ -// 1 3 6 10 13 18 -// / \ / \ / \ / \ / \ -// 0 1 2 4 5 8 9 11 12 16 17 -func Peaks(mmrSize uint64) []uint64 { - if mmrSize == 0 { - return nil - } +// Given the example below, which has an mmrSize of 10, the peaks are [6, 9]: +// +// 2 6 +// / \ +// 1 2 5 9 +// / \ / \ / \ +// 0 0 1 3 4 7 8 +func Peaks(mmrIndex uint64) []uint64 { + + // The peaks algorithm works using the binary properties of the mmr *positions* + + mmrSize := mmrIndex + 1 // catch invalid range, where siblings exist but no parent exists if PosHeight(mmrSize+1) > PosHeight(mmrSize) { return nil } + peak := uint64(0) + var peaks []uint64 // The top peak is always the left most and, when counting from 1, will have all binary '1's - top := uint64(1) - for (top - 1) <= mmrSize { - top <<= 1 + for mmrSize != 0 { + // This next step computes the ^2 floor of the bits in mmrSize, which + // picks out the highest peak (and also left most) remaining peak in + // mmrSize (See TopPeak) + peakSize := TopPeak(mmrSize-1) + 1 // + 1 to recover position form + + // Because we *subtract* the computed peak size from mmrSize, we need to + // recover the actual peak position. The arithmetic all works out so we + // just accumulate the peakSizes as we go, and the result is always the + // peak value against the original mmrSize we were given. + peak = peak + peakSize + peaks = append(peaks, peak-1) + mmrSize -= peakSize } - top = (top >> 1) - 1 - if top == 0 { + return peaks +} + +// PosPeaks is a depricated version of peaks which returns an array of mmr positions rather than indices. +func PosPeaks(mmrSize uint64) []uint64 { + + peaks := Peaks(mmrSize - 1) + if peaks == nil { return nil } - - peaks := []uint64{top} - peak := top -OuterLoop: - for { - peak = JumpRightSibling(peak) - for peak > mmrSize { - if p, ok := LeftChild(peak); ok { - peak = p - continue - } - break OuterLoop - } - peaks = append(peaks, peak) + for i, p := range peaks { + peaks[i] = p + 1 } return peaks } -func HeightPeakRight(mmrSize uint64, height uint64, i uint64) (uint64, uint64, bool) { +func PeakHashes(store indexStoreGetter, mmrIndex uint64) ([][]byte, error) { + // Note: we can implement this directly any time we want, but lets re-use the testing for Peaks + var path [][]byte + for _, i := range Peaks(mmrIndex) { + stored, err := store.Get(i) + if err != nil { + return nil, err + } - // jump to right sibling - i += SiblingOffset(height) + value := make([]byte, 32) + copy(value, stored) - // then the left child - for i > mmrSize-1 { - if height == 0 { - return 0, 0, false - } - height -= 1 - i -= (2 << height) // removes the parent offset + // Note: we create a copy here to ensure the value is not modified under the callers feet + path = append(path, value) } - return height, i, true + return path, nil } -// HighestPos returns the height and the peak index for the highest and -// most left node in the MMR of the given size. -func HighestPos(mmrSize uint64) (uint64, uint64) { - height := uint64(0) - iPrev := uint64(0) - i := LeftPosForHeight(height) - for i < mmrSize { - height += 1 - iPrev = i - i = LeftPosForHeight(height) +// PeakIndex returns the index of the peak accumulator for the peak with the provided proof length. +// +// Given: +// +// leafCount - the count of elements in the current accumulator, eg LeafCount(mmrIndex). +// d - the length of the proof of any element in the mmr identified by leafCount +// +// Return +// +// The index of the accumulator peak produced by a valid inclusion proof of length d +// +// Note that leafCount identifies the mmr state, not the element. +// +// For interior nodes, you must account for the height by adding IndexHeigh(mmrIndex) to the proof length d. +// +// Example: +// +// peaks = PosPeaks(18) = [14, 17] +// peakBits = LeafCount(18) = 101 +// 1 = d = proof len for 6 +// 2 = IndexHeight(6) +// peaks[PeakIndex(peakBits, 1 + 2)] == 14 +// +// For this MMR: +// +// 3 14 +// / \ +// / \ +// / \ +// / \ +// 2 6 13 +// / \ / \ +// 1 2 5 9 12 17 +// / \ / \ / \ / \ / \ +// 0 0 1 3 4 7 8 10 11 15 16 +func PeakIndex(leafCount uint64, d int) int { + + // The bitmask corresponding to the peaks in the accumulator is the leaf + // index e + 1, which is leafCount. + // The inclusion proof depth for any element is always the index + // of a set bit in this mask. + // And the bit corresponds to the peak which is the root for the element who's inclusion is proven. + + peaksMask := uint64(1<<(d+1) - 1) + + // The count of non zero bits + n := bits.OnesCount64(leafCount & peaksMask) + + // We are ajusting to account for the gaps removed from the accumulator in + // our 'packed' representation. but the algerbra just works out so we index + // by the number of set bits. + + // A[d - (d - n) - 1] = A[d -d + n -1] = A[n-1] + + // Now account for the fact that the accumulator lists peaks highest to lowest + // So we need to invert the index + + // The accumulator length a is just the number of bits set in the leaf count + + // (a - 1) - (n -1) = a - 1 - n + 1 = a - n + + return bits.OnesCount64(leafCount) - n +} + +// TopPeak returns the smallest, leftmost, peak containing *or equal to* i +// +// This is essentially a ^2 *floor* function for the accumulation of bits: +// +// TopPeak(0) = TopPeak(1) = 0 +// TopPeak(1) = TopPeak(2) = TopPeak(3) = TopPeak(4) = TopPeak(5) = 2 +// TopPeak(6) = 6 +// +// 2 6 +// / \ +// 1 2 5 9 +// / \ / \ / \ +// 0 0 1 3 4 7 8 10 +func TopPeak(i uint64) uint64 { + + // This works by working out the next peak *position* up then subtracting 1, which is a + // flooring function for the bits over the current peak + return 1<<(BitLength64(i+2)-1) - 2 +} + +// TopHeight returns the index height of the largest perfect peak contained in, or exactly, pos +// This is essentially a ^2 *floor* function for the accumulation of bits: +// +// TopHeight(0) = TopHeight(1) = 0 +// TopHeight(1) = TopHeight(2) = TopHeight(3) = TopHeight(4) = TopHeight(5) = 1 +// TopHeight(6) = 2 +// +// 2 6 +// / \ +// 1 2 5 9 +// / \ / \ / \ +// 0 0 1 3 4 7 8 10 +func TopHeight(i uint64) uint64 { + return BitLength64(i+2) - 2 +} + +// PeaksBitmap returns a bit mask where a 1 corresponds to a peak and the position +// of the bit is the height of that peak. The resulting value is also the count +// of leaves. This is due to the binary nature of the tree. +// +// For example, with an mmr with size 19, there are 11 leaves +// +// 14 +// / \ +// 6 13 +// / \ / \ +// 2 5 9 12 17 +// / \ / \ / \ / \ / \ +// 0 1 3 4 7 8 10 11 15 16 18 +// +// PeakMap(19) returns 0b1011 which shows, reading from the right (low bit), +// there are peaks, that the lowest peak is at height 0, the second lowest at +// height 1, then the next and last peak is at height 3. +// +// If the provided mmr size is invalid, the returned map will be for the largest +// valid mmr size < the provided invalid size. +func PeaksBitmap(mmrSize uint64) uint64 { + if mmrSize == 0 { + return 0 + } + pos := mmrSize + // peakSize := uint64(math.MaxUint64) >> bits.LeadingZeros64(mmrSize) + peakSize := (uint64(1) << bits.Len64(mmrSize)) - 1 + peakMap := uint64(0) + for peakSize > 0 { + peakMap <<= 1 + if pos >= peakSize { + pos -= peakSize + peakMap |= 1 + } + peakSize >>= 1 } - return height - 1, iPrev + return peakMap } diff --git a/mmr/peaks_test.go b/mmr/peaks_test.go index a438796..ef3067a 100644 --- a/mmr/peaks_test.go +++ b/mmr/peaks_test.go @@ -2,12 +2,13 @@ package mmr import ( "fmt" - "math" "reflect" "testing" + + "github.com/stretchr/testify/assert" ) -func TestPeaks(t *testing.T) { +func TestPosPeaks(t *testing.T) { type args struct { mmrSize uint64 } @@ -27,7 +28,36 @@ func TestPeaks(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := Peaks(tt.args.mmrSize); !reflect.DeepEqual(got, tt.want) { + if got := PosPeaks(tt.args.mmrSize); !reflect.DeepEqual(got, tt.want) { + t.Errorf("PosPeaks() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPeaks(t *testing.T) { + type args struct { + mmrIndex uint64 + } + tests := []struct { + name string + args args + want []uint64 + }{ + + {"complete mmr(index 123) gives two peaks", args{FirstMMRSize(123)}, []uint64{126, 127}}, + {"index 123 gives nil", args{123}, []uint64(nil)}, + {"complete index 11 gives three peaks", args{10}, []uint64{6, 9, 10}}, + {"complete index 26 gives 4 peaks", args{25}, []uint64{14, 21, 24, 25}}, + {"complete index 9 gives two peaks", args{9}, []uint64{6, 9}}, + {"complete index 12, which is invalid because it should have been perfectly filled, gives nil", args{13}, nil}, + {"complete index 14, which is perfectly filled, gives a single peak", args{14}, []uint64{14}}, + {"complete index 17 gives two peaks", args{17}, []uint64{14, 17}}, + {"complete index 21 gives two peaks", args{21}, []uint64{14, 21}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := Peaks(tt.args.mmrIndex); !reflect.DeepEqual(got, tt.want) { t.Errorf("Peaks() = %v, want %v", got, tt.want) } }) @@ -95,33 +125,180 @@ func TestAncestors(t *testing.T) { fmt.Printf("height: %d\n", massifHeight) } -func TestHighestPos(t *testing.T) { +func TestTopHeight(t *testing.T) { type args struct { - mmrSize uint64 + mmrIndex uint64 } tests := []struct { - name string - args args - want uint64 - want1 uint64 + name string + args args + want uint64 }{ - {"size 0 corner case", args{0}, math.MaxUint64, 0}, - {"size 1 corner case", args{1}, 0, 0}, - {"size 2", args{2}, 0, 0}, - {"size 3", args{3}, 1, 2}, - {"size 4, two peaks, single solo at i=3", args{4}, 1, 2}, - {"size 5, three peaks, two solo at i=3, i=4", args{5}, 1, 2}, - {"size 6, two perfect peaks,i=2, i=5 (note add does not ever leave the MMR in this state)", args{6}, 1, 2}, - {"size 7, one perfect peaks at i=6", args{7}, 2, 6}, + // 2 6 + // / \ + // 1 2 5 9 + // / \ / \ / \ + // 0 0 1 3 4 7 8 10 + + {"complete index 0 corner case", args{0}, 0}, + {"complete index 2", args{2}, 1}, + {"complete index 3, two peaks, single solo at i=3", args{3}, 1}, + {" index 4, three peaks, two solo at i=3, i=4", args{4}, 1}, + {" index 5, two perfect peaks,i=2, i=5", args{5}, 1}, + {"complete index 7, one perfect peaks at i=6", args{6}, 2}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, got1 := HighestPos(tt.args.mmrSize) + got := TopHeight(tt.args.mmrIndex) if got != tt.want { t.Errorf("HighestPos() got = %v, want %v", got, tt.want) } - if got1 != tt.want1 { - t.Errorf("HigestPos() got1 = %v, want %v", got1, tt.want1) + }) + } +} + +func topPeakLongHand(pos uint64) uint64 { + top := uint64(1) + for (top - 1) <= pos { + top <<= 1 + } + return (top >> 1) - 1 +} + +func TestTopPeak(t *testing.T) { + for i := uint64(0); i < 39; i++ { + t.Run(fmt.Sprintf("TopPeak(%d)", i), func(t *testing.T) { + want := topPeakLongHand(i+1) - 1 + x := 1<<(BitLength64(i+1)-1) - 2 + fmt.Printf("%d %4b %4b %d\n", x, x, i, want) + if got := TopPeak(i); got != want { + t.Errorf("TopPeak(%d) = %v, want %v", i, got, want) + } + }) + } +} +func TestPeaks2(t *testing.T) { + for pos := uint64(1); pos <= 39; pos++ { + t.Run(fmt.Sprintf("Peaks2(%d)", pos), func(t *testing.T) { + fmt.Printf("Peaks2(mmrSize: %d):", pos) + peaks := PeaksOld(pos) + peaks2 := PosPeaks(pos) + assert.Equal(t, peaks, peaks2) + fmt.Printf(" %v", peaks) + fmt.Printf("\n") + }) + } +} +func TestPeakIndex(t *testing.T) { + type args struct { + } + tests := []struct { + mmrIndex uint64 + proofLength int + expected int + }{ + {0, 0, 0}, // degenerate case + + {2, 1, 0}, // 2 is perfect + + // note the form here is len(accumulator) - 1 - the bit index from the right (least significant) with the zero's removed + // except for the perfect peaks which are always 0 + {3, 1, 2 - 1 - 1}, + {3, 0, 2 - 1 - 0}, + + {6, 2, 0}, // 10. 6 is perfect + + {7, 2, 2 - 1 - 1}, + {7, 0, 2 - 1 - 0}, + + {9, 2, 2 - 1 - 1}, // 110. + {9, 1, 2 - 1 - 0}, + + {10, 2, 3 - 1 - 2}, // 111 + {10, 1, 3 - 1 - 1}, // 111 + {10, 0, 3 - 1 - 0}, // 111 + + {14, 3, 0}, // 1000. 14 is perfect + + {15, 3, 2 - 1 - 1}, // 1001 + {15, 0, 2 - 1 - 0}, // 1001 + + {17, 3, 2 - 1 - 1}, // 1010 + {17, 1, 2 - 1 - 0}, // 1010 + + {18, 3, 3 - 1 - 2}, // 1011 + {18, 1, 3 - 1 - 1}, // 1011 + {18, 0, 3 - 1 - 0}, // 1011 + + {21, 3, 2 - 1 - 1}, // 1100 + {21, 2, 2 - 1 - 0}, // 1100 + + {22, 3, 3 - 1 - 2}, // 1101 + {22, 2, 3 - 1 - 1}, // 1101 + {22, 0, 3 - 1 - 0}, // 1101 + + {24, 3, 3 - 1 - 2}, // 1110 + {24, 2, 3 - 1 - 1}, // 1110 + {24, 1, 3 - 1 - 0}, // 1110 + + {25, 3, 4 - 1 - 3}, // 1111 + {25, 2, 4 - 1 - 2}, // 1111 + {25, 1, 4 - 1 - 1}, // 1111 + {25, 0, 4 - 1 - 0}, // 1111 + + {30, 4, 0}, // 10000 perfect + + {31, 4, 2 - 1 - 1}, // 10001 + {31, 0, 2 - 1 - 0}, + + {33, 4, 2 - 1 - 1}, // 10010 + {33, 1, 2 - 1 - 0}, + + {34, 4, 3 - 1 - 2}, // 10011 + {34, 1, 3 - 1 - 1}, // 10011 + {34, 0, 3 - 1 - 0}, // 10011 + + {37, 4, 2 - 1 - 1}, // 10100 + {37, 2, 2 - 1 - 0}, // 10100 + + {38, 4, 3 - 1 - 2}, // 10101 + {38, 2, 3 - 1 - 1}, // 10101 + {38, 0, 3 - 1 - 0}, // 10101 + + } + for _, tt := range tests { + t.Run(fmt.Sprintf("MMR(%d), proof length %d, expected peak %d", tt.mmrIndex, tt.proofLength, tt.expected), func(t *testing.T) { + + peakBits := LeafCount(tt.mmrIndex + 1) + if got := PeakIndex(peakBits, tt.proofLength); got != tt.expected { + t.Errorf("PeakIndex() = %v, want %v", got, tt.expected) + } + }) + } +} + +func TestPeaksBitmap(t *testing.T) { + tests := []struct { + mmrSize uint64 + want uint64 + }{ + {mmrSize: 10, want: 6}, + {mmrSize: 1, want: 1}, + {mmrSize: 3, want: 2}, + {mmrSize: 4, want: 3}, + {mmrSize: 7, want: 4}, + {mmrSize: 8, want: 5}, + {mmrSize: 11, want: 7}, + {mmrSize: 15, want: 8}, + {mmrSize: 16, want: 9}, + {mmrSize: 18, want: 10}, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("PeaksBitmap(%d)", tt.mmrSize), func(t *testing.T) { + got := PeaksBitmap(tt.mmrSize) + fmt.Printf("%02d %05b %05b %05b %02d\n", tt.mmrSize, tt.mmrSize, tt.mmrSize-1, got, got) + if got != tt.want { + t.Errorf("PeaksBitmap(%d) = %v, want %v", tt.mmrSize, got, tt.want) } }) } diff --git a/mmr/peaksold.go b/mmr/peaksold.go new file mode 100644 index 0000000..1d15f19 --- /dev/null +++ b/mmr/peaksold.go @@ -0,0 +1,60 @@ +package mmr + +// Note: the expectation is that once we are satisfied with the new methods we +// will delete this file A reason to keep it around is that testing may benefit +// from having multiple implementations of key algorithms + +// PeaksOld is deprecated and retained only for reference and testing. +// +// returns the array of mountain peaks in the MMR. This is completely +// deterministic given a valid mmr size. If the mmr size is invalid, this +// function returns nil. +// +// It is guaranteed that the peaks are listed in ascending order of position +// value. The highest peak has the lowest position and is listed first. This is +// a consequence of the fact that the 'little' 'down range' peaks can only appear +// to the 'right' of the first perfect peak, and so on recursively. +// +// Note that as a matter of implementation convenience and efficency the peaks +// are returned as *one based positions* +// +// So given the example below, which has an mmrSize of 17, the peaks are [15, 18] +// +// 3 15 +// / \ +// / \ +// / \ +// 2 7 14 +// / \ / \ +// 1 3 6 10 13 18 +// / \ / \ / \ / \ / \ +// 0 1 2 4 5 8 9 11 12 16 17 +func PeaksOld(mmrSize uint64) []uint64 { + if mmrSize == 0 { + return nil + } + + // catch invalid range, where siblings exist but no parent exists + if PosHeight(mmrSize+1) > PosHeight(mmrSize) { + return nil + } + + // The top peak is always the left most and, when counting from 1, will have all binary '1's + top := TopPeak(mmrSize-1) + 1 + + peaks := []uint64{top} + peak := top +OuterLoop: + for { + peak = JumpRightSibling(peak) + for peak > mmrSize { + if p, ok := LeftChild(peak); ok { + peak = p + continue + } + break OuterLoop + } + peaks = append(peaks, peak) + } + return peaks +} diff --git a/mmr/printers.go b/mmr/printers.go new file mode 100644 index 0000000..172e3c6 --- /dev/null +++ b/mmr/printers.go @@ -0,0 +1,27 @@ +package mmr + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// debug utilities + +func proofPathStringer(path [][]byte, sep string) string { + var spath []string + + for _, it := range path { + spath = append(spath, hex.EncodeToString(it)) + } + return strings.Join(spath, sep) +} +func proofPathsStringer(paths [][][]byte, sep string) string { + + spaths := make([]string, 0, len(paths)) + + for _, path := range paths { + spaths = append(spaths, fmt.Sprintf("[%s]", proofPathStringer(path, sep))) + } + return strings.Join(spaths, sep) +} diff --git a/mmr/proof.go b/mmr/proof.go index 104ee4f..e950375 100644 --- a/mmr/proof.go +++ b/mmr/proof.go @@ -1,95 +1,80 @@ package mmr import ( - "hash" - "slices" + "errors" ) -// GetRoot returns the root hash for the Merkle Mountain Range. -// The root is defined as the 'bagging' of all peaks, starting with the highest. -// So its simply a call to BagPeaksRHS for _all_ peaks in the MMR of the provided size. -func GetRoot(mmrSize uint64, store indexStoreGetter, hasher hash.Hash) ([]byte, error) { - peaks := Peaks(mmrSize) - // The root is ALL the peaks. Note that bagging essentially accumulates them in a binary tree. - return BagPeaksRHS(store, hasher, 0, peaks) -} +var ( + ErrProofLenTooLarge = errors.New("proof length value is too large") + ErrPeakListTooShort = errors.New("the list of peak values is too short") +) -// IndexProof provides a proof of inclusion for the leaf at index i against the full MMR +// GetProofPeakRoot returns the peak hash for sub tree committing any node. // -// It relies on the methods IndexProofLocal, BagPeaksRHS and PeaksLHS for -// collecting the necessary MMR elements and then combines the results into a -// final verifiable commitment for the whole MMR. +// This is a convenience for use when the caller does not have the heightIndex naturaly from other operations. // -// The proof layout is conceptualy this: +// A proof for node 2 would be [5], and the peak list for mmrSize 11 would be // -// [local-peak-proof-i, right-sibling-of-i, left-of-i-peaks-reversed] +// [6, 9, 10] // -// So for leaf 15, given +// To obtain the appropriate root to verify a proof of inclusion for node 2 call this function with: // -// 3 14 -// / \ -// / \ -// / \ -// / \ -// 2 6 13 21 -// / \ / \ -// 1 2 5 9 12 17 20 24 -// / \ / \ / \ / \ / \ -// 0 0 1 3 4 7 8 10 11 15 16 18 19 22 23 25 +// peakHashes: [H(6), H(9), H(10)] +// proofLen: 1 +// mmrSize: 11 +// heightIndex: 1 // -// We get +// The returned peak root will be H(6) // -// | BagPeaksRHS | -// . . -// [H(16), H(20), H(H(H(25)|H(24)) | H(21), H(14)] -// ^ . ^ ^ ^ -// .___________. .___________. -// | | -// | reversed(PeaksLHS) -// IndexProofLocal +// For node 7, mmrIndex would be 7, all other parameters would remain the same and the returned value would be H(9) // -// Note that right-sibling is omitted if there is none, and similarly, the left -// peaks. The individual functions producing those elements contain more detail -// over the construction of their particular proof component. -func IndexProof(mmrSize uint64, store indexStoreGetter, hasher hash.Hash, i uint64) ([][]byte, error) { +// 2 6 +// / \ +// 1 2 5 9 +// / \ / \ / \ +// 0 0 1 3 4 7 8 10 +func GetProofPeakRoot(mmrSize uint64, mmrIndex uint64, peakHashes [][]byte, proofLen int) ([]byte, error) { - var err error - var proof [][]byte - var iLocalPeak uint64 // the peak of the local merkle tree containing i - var leftPath [][]byte - var rightSibling []byte + // for leaf nodes, the peak height index is the proof length - 1, for + // generality, to account for interior nodes, we use IndexHeight here. + // In contexts where consistency proofs are being generated to check log + // extension, typically the returned height from InclusionProofPath is + // available. - if proof, iLocalPeak, err = IndexProofLocal(mmrSize, store, i); err != nil { - return nil, err - } + heightIndex := IndexHeight(mmrIndex) - peaks := Peaks(mmrSize) - - if rightSibling, err = BagPeaksRHS(store, hasher, iLocalPeak+1, peaks); err != nil { - return nil, err - } - if rightSibling != nil { - proof = append(proof, rightSibling) + peakIndex := GetProofPeakIndex(mmrSize, proofLen, uint8(heightIndex)) + if peakIndex >= len(peakHashes) { + return nil, ErrPeakListTooShort } + return peakHashes[peakIndex], nil +} - if leftPath, err = PeaksLHS(store, iLocalPeak+1, peaks); err != nil { - return nil, err +// GetLeafProofRoot gets the appropriate peak root from peakHashes for a leaf proof, See GetProofPeakRoot +func GetLeafProofRoot(peakHashes [][]byte, proof [][]byte, mmrSize uint64) ([]byte, error) { + peakIndex := GetProofPeakIndex(mmrSize, len(proof), 0) + if peakIndex >= len(peakHashes) { + return nil, ErrPeakListTooShort } - // reverse(leftPath) - slices.Reverse(leftPath) - proof = append(proof, leftPath...) + return peakHashes[len(peakHashes)-peakIndex-1], nil +} - return proof, nil +// GetLeafProofRoot gets the compressed accumulator peak index for a leaf proof, See GetProofPeakRoot +func GetProofPeakIndex(mmrSize uint64, d int, heightIndex uint8) int { + // get the index into the accumulator + // peakMap is also the leaf count, which is often known to the caller + peakMap := PeaksBitmap(mmrSize) + return PeakIndex(peakMap, int(heightIndex)+d) } -// IndexProofLocal collects the merkle root proof for the local MMR peak containing index i +// IndexPath collects the merkle proof mmr index i // -// So for the follwing index tree, and i=15 with mmrSize = 26 we would obtain the path +// For the following index tree, and i=15 with mmrSize = 26 we would obtain the path // // [H(16), H(20)] // -// Because the local peak is 21, and given the value for 15, we only need 16 and -// then 20 to prove the local root. +// Because the accumulator peak committing 15 is 21, and given the value for 15, we only need 16 and +// then 20 to verify the proof. // // 3 14 // / \ @@ -101,183 +86,104 @@ func IndexProof(mmrSize uint64, store indexStoreGetter, hasher hash.Hash, i uint // 1 2 5 9 12 17 20 24 // / \ / \ / \ / \ / \ // 0 0 1 3 4 7 8 10 11 15 16 18 19 22 23 25 -func IndexProofLocal(mmrSize uint64, store indexStoreGetter, i uint64) ([][]byte, uint64, error) { +func InclusionProof(store indexStoreGetter, mmrLastIndex uint64, i uint64) ([][]byte, error) { + + var iSibling uint64 var proof [][]byte - height := IndexHeight(i) // allows for proofs of interior nodes - - var err error - var value []byte - - for i < mmrSize { - iHeight := IndexHeight(i) - iNextHeight := IndexHeight(i + 1) - if iNextHeight > iHeight { - iSibling := i - SiblingOffset(height) - if iSibling >= mmrSize { - break - } - - if value, err = store.Get(iSibling); err != nil { - return nil, 0, err - } - proof = append(proof, value) - // go to parent node - i += 1 - } else { - iSibling := i + SiblingOffset(height) - if iSibling >= mmrSize { - break - } - - if value, err = store.Get(iSibling); err != nil { - return nil, 0, err - } - proof = append(proof, value) - // goto parent node - i += 2 << height - } - height += 1 + + if i > mmrLastIndex { + return nil, errors.New("index out of range") } - return proof, i, nil -} -// LeftPosForHeight returns the position that is 'most left' for the given height. -// Eg for height 0, it returns 0, for height 1 it returns 2, for 2 it returns 6. -// Note that these are always values where the corresponding 1 based position -// has 'all ones' set. -func LeftPosForHeight(height uint64) uint64 { - return (1 << (height + 1)) - 2 -} + g := IndexHeight(i) // allows for proofs of interior nodes -// BagPeaksRHS computes a root for the RHS peaks. -// This function will only return an err if there is an issue fetching a value -// from the provided store. -// -// The burden is on the _caller_ to provide valid peaks for the given position pos -// -// If there are no peaks to the right of pos, this function returns nil, nil. This -// means the sibling hash for pos is to the left and the return value should be -// ignored. -// -// Working exclusively in positions rather than indices, If the peak pos is 25, -// then the RHS (and the sibling hash) is just H(26), if pos is 26 then there is -// not right sibling, and this method would return nil. -// -// The peaks are listed in ascending order (ie from the end of -// the range back towards pos), So when pos is 15, the RHS sibling hash will -// be: -// -// H(H(H(right)|H(left)) | H(22)) -// -// Which is: -// -// H(H(H(26)|H(25)) | H(22)) -// -// 3 15 -// / \ -// / \ -// / \ -// 2 7 14 22 -// / \ / \ / \ -// 1 3 6 10 13 18 21 25 -// / \ / \ / \ / \ / \ / \ / \ -// 0 1 2 4 5 8 9 11 12 16 17 19 20 23 24 26 -func BagPeaksRHS(store indexStoreGetter, hasher hash.Hash, pos uint64, peaks []uint64) ([]byte, error) { - - peakHashes, err := PeakBagRHS(store, hasher, pos, peaks) - if err != nil { - return nil, err - } + for { // iSibling is guaranteed to break the loop - root := hashPeaksRHS(hasher, peakHashes) - return root, nil -} + // The sibling of i is at i +/- 2^(g+1) + siblingOffset := uint64((2 << g)) + + // If the index after i is heigher, it is the left parent, and i is the right sibling. + if IndexHeight(i+1) > g { + // The witness to the right sibling is offset behind i + iSibling = i - siblingOffset + 1 + + // The parent of a right sibling is stored imediately after the sibling + i += 1 + } else { -// PeakBagRHS collects the peaks for BagPeaksRHS in the right order for hashing -func PeakBagRHS( - store indexStoreGetter, hasher hash.Hash, pos uint64, peaks []uint64) ([][]byte, error) { - var err error - var value []byte - var peakHashes [][]byte + // The witness to the left sibling is offset ahead of i + iSibling = i + siblingOffset - 1 - for _, peakPos := range peaks { + // The parent of a left sibling is stored imediately after its right sibling + i += siblingOffset + } - // skip all left peaks and the pos peak - if peakPos <= pos { - continue + // When the computed sibling exceedes the range of MMR(C+1), + // we have completed the path. + if iSibling > mmrLastIndex { + return proof, nil } - // As the leaves are indexed from zero, we just do pos - 1 to access the leaf. - if value, err = store.Get(peakPos - 1); err != nil { + + value, err := store.Get(iSibling) + if err != nil { return nil, err } - peakHashes = append(peakHashes, value) + proof = append(proof, value) + + // Set g to the height of the next item in the path. + g += 1 } - return peakHashes, nil } -// hashPeaksRHS creates a binary merkle tree from the peaks to obtain a single -// tree root. +// returns the mmr indices identifying the witness nodes for mmr index i // -// WARNING: MUTATES the input slice by popping items from it -func hashPeaksRHS(hasher hash.Hash, peakHashes [][]byte) []byte { +// This method allows tooling to individually audit the proof path node values for a given index. +func InclusionProofPath(mmrLastIndex uint64, i uint64) ([]uint64, error) { - var right []byte - var left []byte + var iSibling uint64 - // The hashes are highest to lowest, we are popping so we consume from the end backwards. - for len(peakHashes) > 1 { + var proof []uint64 + g := IndexHeight(i) // allows for proofs of interior nodes - right, peakHashes = peakHashes[len(peakHashes)-1], peakHashes[:len(peakHashes)-1] // go lang's array pop - left, peakHashes = peakHashes[len(peakHashes)-1], peakHashes[:len(peakHashes)-1] // go lang's array pop + for { // iSibling is guaranteed to break the loop - hasher.Reset() - hasher.Write(right) - hasher.Write(left) + // The sibling of i is at i +/- 2^(g+1) + siblingOffset := uint64((2 << g)) - peakHashes = append(peakHashes, hasher.Sum(nil)) - } - if len(peakHashes) > 0 { - return peakHashes[0] - } - return nil -} + // If the index after i is heigher, it is the left parent, and i is the right sibling. + if IndexHeight(i+1) > g { + // The witness to the right sibling is offset behind i + iSibling = i - siblingOffset + 1 -// HashPeaksRHS merkleizes the peaks to obtain a single tree root -// This variant copies the peakHashes list in order to be side effect free. -func HashPeaksRHS(hasher hash.Hash, peakHashes [][]byte) []byte { - return hashPeaksRHS(hasher, append([][]byte(nil), peakHashes...)) -} + // The parent of a right sibling is stored imediately after the sibling + i += 1 + } else { -// PeaksLHS collects the peaks to the left of position pos into a flat sequence -// -// So for the following tree and pos=25 we would get -// -// [15, 22] -// -// 3 15 -// / \ -// / \ -// / \ -// 2 7 14 22 -// / \ / \ / \ -// 1 3 6 10 13 18 21 25 -// / \ / \ / \ / \ / \ / \ / \ -// 0 1 2 4 5 8 9 11 12 16 17 19 20 23 24 26 -func PeaksLHS(store indexStoreGetter, pos uint64, peaks []uint64) ([][]byte, error) { - - var err error - var value []byte - var peakHashes [][]byte - - for _, peakPos := range peaks { - if peakPos >= pos { - break + // The witness to the left sibling is offset ahead of i + iSibling = i + siblingOffset - 1 + + // The parent of a left sibling is stored imediately after its right sibling + i += siblingOffset } - if value, err = store.Get(peakPos - 1); err != nil { - return nil, err + + // When the computed sibling exceedes the range of MMR(C+1), + // we have completed the path. + if iSibling > mmrLastIndex { + return proof, nil } - peakHashes = append(peakHashes, value) + + proof = append(proof, iSibling) + + // Set g to the height of the next item in the path. + g += 1 } - return peakHashes, nil +} + +// LeftPosForHeight returns the position that is 'most left' for the given height. +// Eg for height 0, it returns 0, for height 1 it returns 2, for 2 it returns 6. +// Note that these are always values where the corresponding 1 based position +// has 'all ones' set. +func LeftPosForHeight(height uint64) uint64 { + return (1 << (height + 1)) - 2 } diff --git a/mmr/proof_test.go b/mmr/proof_test.go index 40d1174..cdad545 100644 --- a/mmr/proof_test.go +++ b/mmr/proof_test.go @@ -91,7 +91,7 @@ func TestGetRoot(t *testing.T) { } } -func TestIndexProofLocal(t *testing.T) { +func TestInclusionProofLocal(t *testing.T) { db := NewCanonicalTestDB(t) // H return the node hash for index i from the canonical test tree. @@ -189,16 +189,129 @@ func TestIndexProofLocal(t *testing.T) { if mmrSize == 0 { mmrSize = tt.args.store.Next() } - got, got1, err := IndexProofLocal(mmrSize, tt.args.store, tt.args.i) + got, got1, err := InclusionProofLocal(mmrSize, tt.args.store, tt.args.i) if (err != nil) != tt.wantErr { - t.Errorf("IndexProofLocal() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("InclusionProofLocal() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { - t.Errorf("IndexProofLocal() = %v, want %v", got, tt.want) + t.Errorf("InclusionProofLocal() = %v, want %v", got, tt.want) } if got1 != tt.want1 { - t.Errorf("IndexProofLocal() = %v, want %v", got1, tt.want1) + t.Errorf("InclusionProofLocal() = %v, want %v", got1, tt.want1) + } + }) + } +} + +func TestInclusionProofLocalOld(t *testing.T) { + db := NewCanonicalTestDB(t) + + // H return the node hash for index i from the canonical test tree. + // + // The canonical test tree has the hashes for all the positions, including + // the interior nodes. Created by mandraulically hasing nodes so that tree + // concstruction can legitemately be tested against it. + H := func(i uint64) []byte { + return db.mustGet(i) + } + + // the proof nodes for leaf 0 + h1 := H(1) + h5 := H(5) + h13 := H(13) + // the additional proof nodes for leaf 1 + h0 := H(0) + + type args struct { + store testStoreProver + i uint64 + mmrSize uint64 // set zero to take from db + } + tests := []struct { + name string + args args + want [][]byte + want1 uint64 + wantErr bool + }{ + // the 0 based tree + // 3 14 + // / \ + // / \ + // / \ + // / \ + // 2 6 13 + // / \ / \ + // 1 2 5 9 12 17 + // / \ / \ / \ / \ / \ + // 0 0 1 3 4 7 8 10 11 15 16 18 + + {"2 (interior node)", args{ + db, 2, 26, + }, [][]byte{H(5), H(13)}, 14, false}, + + {"2 (interior node) smaller mmr", args{ + db, 2, 11, + }, [][]byte{H(5)}, 6, false}, + + {"0", args{ + db, 0, 26, + }, [][]byte{h1, h5, h13}, 14, false}, + + {"1", args{ + db, 1, 26, + }, [][]byte{h0, h5, h13}, 14, false}, + + {"3", args{ + db, 3, 26, + }, [][]byte{H(4), H(2), H(13)}, 14, false}, + + {"4", args{ + db, 4, 26, + }, [][]byte{H(3), H(2), H(13)}, 14, false}, + + {"7", args{ + db, 7, 26, + }, [][]byte{H(8), H(12), H(6)}, 14, false}, + + {"8", args{ + db, 8, 26, + }, [][]byte{H(7), H(12), H(6)}, 14, false}, + + {"10", args{ + db, 10, 26, + }, [][]byte{H(11), H(9), H(6)}, 14, false}, + + {"11", args{ + db, 11, 26, + }, [][]byte{H(10), H(9), H(6)}, 14, false}, + + // Notice: this is the isolated peak, hence the short length and the alternate root + {"15", args{ + db, 15, 18, + }, [][]byte{H(16)}, 17, false}, + + {"16", args{ + db, 16, 18, + }, [][]byte{H(15)}, 17, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mmrSize := tt.args.mmrSize + if mmrSize == 0 { + mmrSize = tt.args.store.Next() + } + got, got1, err := InclusionProofLocalOld(mmrSize, tt.args.store, tt.args.i) + if (err != nil) != tt.wantErr { + t.Errorf("InclusionProofLocal() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("InclusionProofLocal() = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("InclusionProofLocal() = %v, want %v", got1, tt.want1) } }) } @@ -312,102 +425,3 @@ func TestLeftPosForHeight(t *testing.T) { }) } } - -func TestHeightPeakRight(t *testing.T) { - - type args struct { - mmrSize uint64 - height uint64 - i uint64 - } - tests := []struct { - name string - args args - want uint64 - want1 uint64 - want2 bool - }{ - // 3 14 - // / \ - // / \ - // / \ - // / \ - // 2 6 13 - // / \ / \ - // 1 2 5 9 12 17 20 - // / \ / \ / \ / \ / \ - // 0 0 1 3 4 7 8 10 11 15 16 18 | 19 - {"happy path 1", args{19, 1, 2}, 1, 5, true}, - // behaviour on the 'sad' path is undefined - // {"sad path 2", args{19, 1, 5}, 1, 9, false}, // no right sibling - {"happy path 3", args{19, 1, 9}, 1, 12, true}, - // {"sad path 4", args{19, 1, 12}, 1, 17, false}, no right sibling - {"happy path 14", args{19, 3, 14}, 1, 17, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, got1, got2 := HeightPeakRight(tt.args.mmrSize, tt.args.height, tt.args.i) - if got2 != tt.want2 { - t.Errorf("HeightPeakRight() got2 = %v, want %v", got2, tt.want2) - } - if got != tt.want { - t.Errorf("HeightPeakRight() got = %v, want %v", got, tt.want) - } - if got1 != tt.want1 { - t.Errorf("HeightPeakRight() got1 = %v, want %v", got1, tt.want1) - } - }) - } -} - -func TestPeaksLHS(t *testing.T) { - - db := NewCanonicalTestDB(t) - - // H gets a hash from store by *index* - H := func(i uint64) []byte { - return db.mustGet(i) - } - - type args struct { - store indexStoreGetter - i uint64 - peaks []uint64 - } - tests := []struct { - name string - args args - want [][]byte - wantErr bool - }{ - // We get 'peaks' as one based positions, but we get the 'right' peak as - // a zero based index. Lets visualise the tree as positions: - // - // 3 15 - // / \ - // / \ - // / \ - // 2 7 14 22 - // / \ / \ / \ - // 1 3 6 10 13 18 21 25 - // / \ / \ / \ / \ / \ / \ / \ - // 0 1 2 4 5 8 9 11 12 16 17 19 20 23 24 26 - - { - "two left, one right", - args{db, 24 /*index not pos*/, []uint64{15, 22, 25, 26}}, [][]byte{H(15 - 1), H(22 - 1)}, false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := PeaksLHS(tt.args.store, tt.args.i, tt.args.peaks) - if (err != nil) != tt.wantErr { - t.Errorf("PeaksLHS() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("PeaksLHS() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/mmr/proofbagged.go b/mmr/proofbagged.go new file mode 100644 index 0000000..5e8e692 --- /dev/null +++ b/mmr/proofbagged.go @@ -0,0 +1,278 @@ +package mmr + +import ( + "hash" + "slices" +) + +// Verification is by default against the MRR accumulator peaks (see verify.go). The "Bagged" +// variants work with proofs against a "Bagged" singular mono root for an MMR. + +// GetRoot returns the root hash for the Merkle Mountain Range. +// The root is defined as the 'bagging' of all peaks, starting with the highest. +// So its simply a call to BagPeaksRHS for _all_ peaks in the MMR of the provided size. +func GetRoot(mmrSize uint64, store indexStoreGetter, hasher hash.Hash) ([]byte, error) { + peaks := PosPeaks(mmrSize) + // The root is ALL the peaks. Note that bagging essentially accumulates them in a binary tree. + return BagPeaksRHS(store, hasher, 0, peaks) +} + +// InclusionProofBagged provides a proof of inclusion for the leaf at index i against the full MMR +// +// It relies on the methods InclusionProofLocal, BagPeaksRHS and PeaksLHS for +// collecting the necessary MMR elements and then combines the results into a +// final verifiable commitment for the whole MMR. +// +// The proof layout is conceptualy this: +// +// [local-peak-proof-i, right-sibling-of-i, left-of-i-peaks-reversed] +// +// So for leaf 15, given +// +// 3 14 +// / \ +// / \ +// / \ +// / \ +// 2 6 13 21 +// / \ / \ +// 1 2 5 9 12 17 20 24 +// / \ / \ / \ / \ / \ +// 0 0 1 3 4 7 8 10 11 15 16 18 19 22 23 25 +// +// We get +// +// | BagPeaksRHS | +// . . +// [H(16), H(20), H(H(H(25)|H(24)) | H(21), H(14)] +// ^ . ^ ^ ^ +// .___________. .___________. +// | | +// | reversed(PeaksLHS) +// InclusionProofPath +// +// Note that right-sibling is omitted if there is none, and similarly, the left +// peaks. The individual functions producing those elements contain more detail +// over the construction of their particular proof component. +func InclusionProofBagged(mmrSize uint64, store indexStoreGetter, hasher hash.Hash, i uint64) ([][]byte, error) { + + var err error + var proof [][]byte + var iLocalPeak uint64 // the peak of the local merkle tree containing i + var leftPath [][]byte + var rightSibling []byte + + if proof, iLocalPeak, err = InclusionProofLocal(mmrSize, store, i); err != nil { + return nil, err + } + + peaks := PosPeaks(mmrSize) + + if rightSibling, err = BagPeaksRHS(store, hasher, iLocalPeak+1, peaks); err != nil { + return nil, err + } + if rightSibling != nil { + proof = append(proof, rightSibling) + } + + if leftPath, err = PeaksLHS(store, iLocalPeak+1, peaks); err != nil { + return nil, err + } + // reverse(leftPath) + slices.Reverse(leftPath) + proof = append(proof, leftPath...) + + return proof, nil +} + +// BagPeaksRHS computes a root for the RHS peaks. +// This function will only return an err if there is an issue fetching a value +// from the provided store. +// +// The burden is on the _caller_ to provide valid peaks for the given position pos +// +// If there are no peaks to the right of pos, this function returns nil, nil. This +// means the sibling hash for pos is to the left and the return value should be +// ignored. +// +// Working exclusively in positions rather than indices, If the peak pos is 25, +// then the RHS (and the sibling hash) is just H(26), if pos is 26 then there is +// not right sibling, and this method would return nil. +// +// The peaks are listed in ascending order (ie from the end of +// the range back towards pos), So when pos is 15, the RHS sibling hash will +// be: +// +// H(H(H(right)|H(left)) | H(22)) +// +// Which is: +// +// H(H(H(26)|H(25)) | H(22)) +// +// 3 15 +// / \ +// / \ +// / \ +// 2 7 14 22 +// / \ / \ / \ +// 1 3 6 10 13 18 21 25 +// / \ / \ / \ / \ / \ / \ / \ +// 0 1 2 4 5 8 9 11 12 16 17 19 20 23 24 26 +func BagPeaksRHS(store indexStoreGetter, hasher hash.Hash, pos uint64, peaks []uint64) ([]byte, error) { + + peakHashes, err := PeakBagRHS(store, hasher, pos, peaks) + if err != nil { + return nil, err + } + + root := hashPeaksRHS(hasher, peakHashes) + return root, nil +} + +// PeakBagRHS collects the peaks for BagPeaksRHS in the right order for hashing +func PeakBagRHS( + store indexStoreGetter, hasher hash.Hash, pos uint64, peaks []uint64) ([][]byte, error) { + var err error + var value []byte + var peakHashes [][]byte + + for _, peakPos := range peaks { + + // skip all left peaks and the pos peak + if peakPos <= pos { + continue + } + // As the leaves are indexed from zero, we just do pos - 1 to access the leaf. + if value, err = store.Get(peakPos - 1); err != nil { + return nil, err + } + peakHashes = append(peakHashes, value) + } + return peakHashes, nil +} + +// InclusionProofLocal collects the merkle root proof for the local MMR peak containing index i +// +// So for the follwing index tree, and i=15 with mmrSize = 26 we would obtain the path +// +// [H(16), H(20)] +// +// Because the local peak is 21, and given the value for 15, we only need 16 and +// then 20 to prove the local root. +// +// 3 14 +// / \ +// / \ +// / \ +// / \ +// 2 6 13 21 +// / \ / \ +// 1 2 5 9 12 17 20 24 +// / \ / \ / \ / \ / \ +// 0 0 1 3 4 7 8 10 11 15 16 18 19 22 23 25 +func InclusionProofLocal(mmrSize uint64, store indexStoreGetter, i uint64) ([][]byte, uint64, error) { + + var proof [][]byte + height := IndexHeight(i) // allows for proofs of interior nodes + + var err error + var value []byte + + for i < mmrSize { + iHeight := IndexHeight(i) + iNextHeight := IndexHeight(i + 1) + if iNextHeight > iHeight { + iSibling := i - SiblingOffset(height) + if iSibling >= mmrSize { + break + } + + if value, err = store.Get(iSibling); err != nil { + return nil, 0, err + } + proof = append(proof, value) + // go to parent node + i += 1 + } else { + iSibling := i + SiblingOffset(height) + if iSibling >= mmrSize { + break + } + + if value, err = store.Get(iSibling); err != nil { + return nil, 0, err + } + proof = append(proof, value) + // goto parent node + i += 2 << height + } + height += 1 + } + return proof, i, nil +} + +// hashPeaksRHS creates a binary merkle tree from the peaks to obtain a single +// tree root. +// +// WARNING: MUTATES the input slice by popping items from it +func hashPeaksRHS(hasher hash.Hash, peakHashes [][]byte) []byte { + + var right []byte + var left []byte + + // The hashes are highest to lowest, we are popping so we consume from the end backwards. + for len(peakHashes) > 1 { + + right, peakHashes = peakHashes[len(peakHashes)-1], peakHashes[:len(peakHashes)-1] // go lang's array pop + left, peakHashes = peakHashes[len(peakHashes)-1], peakHashes[:len(peakHashes)-1] // go lang's array pop + + hasher.Reset() + hasher.Write(right) + hasher.Write(left) + + peakHashes = append(peakHashes, hasher.Sum(nil)) + } + if len(peakHashes) > 0 { + return peakHashes[0] + } + return nil +} + +// HashPeaksRHS merkleizes the peaks to obtain a single tree root +// This variant copies the peakHashes list in order to be side effect free. +func HashPeaksRHS(hasher hash.Hash, peakHashes [][]byte) []byte { + return hashPeaksRHS(hasher, append([][]byte(nil), peakHashes...)) +} + +// PeaksLHS collects the peaks to the left of position pos into a flat sequence +// +// So for the following tree and pos=25 we would get +// +// [15, 22] +// +// 3 15 +// / \ +// / \ +// / \ +// 2 7 14 22 +// / \ / \ / \ +// 1 3 6 10 13 18 21 25 +// / \ / \ / \ / \ / \ / \ / \ +// 0 1 2 4 5 8 9 11 12 16 17 19 20 23 24 26 +func PeaksLHS(store indexStoreGetter, pos uint64, peaks []uint64) ([][]byte, error) { + + var err error + var value []byte + var peakHashes [][]byte + + for _, peakPos := range peaks { + if peakPos >= pos { + break + } + if value, err = store.Get(peakPos - 1); err != nil { + return nil, err + } + peakHashes = append(peakHashes, value) + } + return peakHashes, nil +} diff --git a/mmr/proofofconsistency.go b/mmr/proofofconsistency.go index cb00d53..8407ff7 100644 --- a/mmr/proofofconsistency.go +++ b/mmr/proofofconsistency.go @@ -1,7 +1,5 @@ package mmr -import "hash" - // ConsistencyProof describes a proof that the merkle log defined by size a is // perfectly contained in the log described by size b. This structure aligns us // with the consistency proof format described in this ietf draft: @@ -12,44 +10,45 @@ import "hash" // A reference introducing the concept of consistency proofs in merkle trees: // https://pangea.cloud/docs/audit/merkle-trees#outline-consistency-proof type ConsistencyProof struct { - MMRSizeA uint64 `cbor:"1,keyasint"` - MMRSizeB uint64 `cbor:"2,keyasint"` - Path [][]byte `cbor:"3,keyasint"` + MMRSizeA uint64 `cbor:"1,keyasint"` + MMRSizeB uint64 `cbor:"2,keyasint"` + // legacy proof format + PathBagged [][]byte `cbor:"3,keyasint"` + Path [][][]byte `cbor:"4,keyasint"` } // IndexConsistencyProof creates a proof that mmr B appends to mmr A. // Our method works by generating inclusion proofs for each of the peaks of A. -// This method results in a proof path that has some redundancy in it, but -// permits re-use of the inclusion proof verification method. // // As each peak is an interior node, and as each interior node commits to the // number of nodes under it (the count of nodes at that point) there is only one // possible location the node can exist in the tree. If node x is in both mmr A // and mmr B then it is included in exactly the same position. // -// Verification will first show that the root of A can be re-produced from MMR -// B, and then proceed to checking the inclusion proofs for the A peaks in mmr -// B. +// Verification is then performed in terms of the mmr accumulator states MMR(A) +// and MMR(B) for each "old" peak in MMR(A) we show there is a path to a "new" +// or "same" peak in MMR(B) func IndexConsistencyProof( - mmrSizeA, mmrSizeB uint64, store indexStoreGetter, hasher hash.Hash, + store indexStoreGetter, mmrIndexA, mmrIndexB uint64, ) (ConsistencyProof, error) { - proof := ConsistencyProof{ - MMRSizeA: mmrSizeA, - MMRSizeB: mmrSizeB, + MMRSizeA: mmrIndexA + 1, + MMRSizeB: mmrIndexB + 1, } // Find the peaks corresponding to the previous mmr - peaksA := Peaks(mmrSizeA) + peaksA := Peaks(mmrIndexA) // Now generate peak proofs against the new mmr size, using the peak indices // as the input indices to prove for _, iPeakA := range peaksA { - peakProof, err := IndexProof(mmrSizeB, store, hasher, iPeakA-1) + + peakProof, err := InclusionProof(store, mmrIndexB, iPeakA) if err != nil { return ConsistencyProof{}, err } - proof.Path = append(proof.Path, peakProof...) + proof.Path = append(proof.Path, peakProof) } return proof, nil + } diff --git a/mmr/proofofconsistency_test.go b/mmr/proofofconsistency_test.go index 16641b3..2b9a75c 100644 --- a/mmr/proofofconsistency_test.go +++ b/mmr/proofofconsistency_test.go @@ -2,9 +2,13 @@ package mmr import ( "crypto/sha256" + "fmt" "hash" "reflect" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func testMinimal(t *testing.T, hasher hash.Hash, store *testDb) { @@ -15,7 +19,7 @@ func testMinimal(t *testing.T, hasher hash.Hash, store *testDb) { t.Errorf(": %v", err) } - peakProof, err := IndexProof(11, store, hasher, 0) + peakProof, err := InclusionProofBagged(11, store, hasher, 0) if err != nil { t.Errorf(": %v", err) } @@ -26,12 +30,12 @@ func testMinimal(t *testing.T, hasher hash.Hash, store *testDb) { t.Errorf(": %v", err) } - ok := VerifyInclusion(11, hasher, peakHash, 0, peakProof, rootA) + ok := VerifyInclusionBagged(11, hasher, peakHash, 0, peakProof, rootA) if !ok { t.Errorf("it is not ok") } - peakProof, err = IndexProof(11, store, hasher, 1) + peakProof, err = InclusionProofBagged(11, store, hasher, 1) if err != nil { t.Errorf(": %v", err) } @@ -42,12 +46,12 @@ func testMinimal(t *testing.T, hasher hash.Hash, store *testDb) { t.Errorf(": %v", err) } - ok = VerifyInclusion(11, hasher, peakHash, 1, peakProof, rootA) + ok = VerifyInclusionBagged(11, hasher, peakHash, 1, peakProof, rootA) if !ok { t.Errorf("it is not ok") } - peakProof, err = IndexProof(11, store, hasher, 2) + peakProof, err = InclusionProofBagged(11, store, hasher, 2) if err != nil { t.Errorf(": %v", err) } @@ -58,12 +62,12 @@ func testMinimal(t *testing.T, hasher hash.Hash, store *testDb) { t.Errorf(": %v", err) } - ok = VerifyInclusion(11, hasher, peakHash, 2, peakProof, rootA) + ok = VerifyInclusionBagged(11, hasher, peakHash, 2, peakProof, rootA) if !ok { t.Errorf("it is not ok") } - peakProof, err = IndexProof(11, store, hasher, 6) + peakProof, err = InclusionProofBagged(11, store, hasher, 6) if err != nil { t.Errorf(": %v", err) } @@ -74,7 +78,7 @@ func testMinimal(t *testing.T, hasher hash.Hash, store *testDb) { t.Errorf(": %v", err) } - ok = VerifyInclusion(11, hasher, peakHash, 6, peakProof, rootA) + ok = VerifyInclusionBagged(11, hasher, peakHash, 6, peakProof, rootA) if !ok { t.Errorf("it is not ok") } @@ -87,6 +91,142 @@ func TestIndexConsistencyProof(t *testing.T) { testMinimal(t, hasher, store) + type args struct { + mmrSizeA uint64 + mmrSizeB uint64 + } + tests := []struct { + name string + args args + wantProof ConsistencyProof + wantPeaksA [][]byte + wantPeaksB [][]byte + wantProofErr bool + wantVerify bool + }{ + { + name: "11 to 18", + args: args{ + mmrSizeA: 11, + mmrSizeB: 18, + }, + wantProof: ConsistencyProof{ + MMRSizeA: 11, + MMRSizeB: 18, + Path: [][][]byte{ + { + // 6 in 18 + store.mustGet(13), + }, + // 9 in 18 + { + store.mustGet(12), + store.mustGet(6), + }, + // 10 in 18 + { + store.mustGet(11), + store.mustGet(9), + store.mustGet(6), + }, + }, + }, + wantPeaksA: [][]byte{ + store.mustGet(6), + store.mustGet(9), + store.mustGet(10), + }, + wantPeaksB: [][]byte{ + store.mustGet(14), + store.mustGet(17), + }, + wantProofErr: false, + wantVerify: true, + }, + { + name: "7 to 15", + args: args{ + mmrSizeA: 7, + mmrSizeB: 15, + }, + wantProof: ConsistencyProof{}, + wantProofErr: false, + wantVerify: true, + }, + { + name: "7 to 63", + args: args{ + mmrSizeA: 7, + mmrSizeB: 63, + }, + wantProof: ConsistencyProof{}, + wantProofErr: false, + wantVerify: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := IndexConsistencyProof(store, tt.args.mmrSizeA-1, tt.args.mmrSizeB-1) + if (err != nil) != tt.wantProofErr { + t.Errorf("IndexConsistencyProof() error = %v, wantErr %v", err, tt.wantProofErr) + return + } + + if tt.wantProof.Path != nil { + fmt.Printf("Path: expect: %s\n", proofPathsStringer(tt.wantProof.Path, ", ")) + fmt.Printf("Path: got : %s\n", proofPathsStringer(got.Path, ", ")) + } + + if tt.wantProof.MMRSizeA != 0 && tt.wantProof.MMRSizeA != got.MMRSizeA { + t.Errorf( + "IndexConsistencyProof(), want MMRSizeA %d, got %d", + tt.wantProof.MMRSizeA, got.MMRSizeA) + } + if tt.wantProof.MMRSizeB != 0 && tt.wantProof.MMRSizeB != got.MMRSizeB { + t.Errorf( + "IndexConsistencyProof(), want MMRSizeB %d, got %d", + tt.wantProof.MMRSizeB, got.MMRSizeB) + } + if tt.wantProof.Path != nil && !reflect.DeepEqual(got.Path, tt.wantProof.Path) { + t.Errorf("IndexConsistencyProof(), want %v, got %v", tt.wantProof.Path, got.Path) + } + + peakHashesA, err := PeakHashes(store, got.MMRSizeA-1) + if tt.wantPeaksA != nil { + require.NoError(t, err) + fmt.Printf("peakHashesA expect: %s\n", proofPathStringer(peakHashesA, ", ")) + fmt.Printf("peakHashesA got : %s\n", proofPathStringer(peakHashesA, ", ")) + assert.Equal(t, peakHashesA, tt.wantPeaksA) + } + peakHashesB, err := PeakHashes(store, got.MMRSizeB-1) + if tt.wantPeaksB != nil { + require.NoError(t, err) + fmt.Printf("peakHashesB expect: %s\n", proofPathStringer(peakHashesB, ", ")) + fmt.Printf("peakHashesB got : %s\n", proofPathStringer(peakHashesB, ", ")) + assert.Equal(t, peakHashesB, tt.wantPeaksB) + } + + // If the passing test doesn't produce a valid proof then we are done. + if tt.wantProofErr == true { + return + } + + verified, _ /*peaksB*/, err := VerifyConsistency(hasher, got, peakHashesA, peakHashesB) + require.NoError(t, err) + if tt.wantVerify != verified { + t.Errorf("VerifyConsistency() = %v, expected %v", tt.wantVerify, verified) + } + }) + } +} + +func TestIndexConsistencyProofBagged(t *testing.T) { + + hasher := sha256.New() + store := NewGeneratedTestDB(t, 63) + + testMinimal(t, hasher, store) + type args struct { mmrSizeA uint64 mmrSizeB uint64 @@ -131,7 +271,7 @@ func TestIndexConsistencyProof(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := IndexConsistencyProof(tt.args.mmrSizeA, tt.args.mmrSizeB, store, hasher) + got, err := IndexConsistencyProofBagged(tt.args.mmrSizeA, tt.args.mmrSizeB, store, hasher) if (err != nil) != tt.wantProofErr { t.Errorf("IndexConsistencyProof() error = %v, wantErr %v", err, tt.wantProofErr) return @@ -155,7 +295,7 @@ func TestIndexConsistencyProof(t *testing.T) { return } - iPeaks := Peaks(got.MMRSizeA) + iPeaks := PosPeaks(got.MMRSizeA) peakHashesA, err := PeakBagRHS(store, hasher, 0, iPeaks) if err != nil { t.Errorf("PeakBagRHS: %v", err) @@ -173,7 +313,7 @@ func TestIndexConsistencyProof(t *testing.T) { t.Errorf("GetRoot: %v", err) } - verified := VerifyConsistency(hasher, peakHashesA, got, rootA, rootB) + verified := VerifyConsistencyBagged(hasher, peakHashesA, got, rootA, rootB) if tt.wantVerify != verified { t.Errorf("VerifyConsistency() = %v, expected %v", tt.wantVerify, verified) diff --git a/mmr/proofofconsistencybagged.go b/mmr/proofofconsistencybagged.go new file mode 100644 index 0000000..2d9fba3 --- /dev/null +++ b/mmr/proofofconsistencybagged.go @@ -0,0 +1,40 @@ +package mmr + +import "hash" + +// IndexConsistencyProofBagged creates a proof that mmr B appends to mmr A. +// This method works by generating inclusion proofs for each of the peaks of A. +// This method results in a proof path that has some redundancy in it, but +// permits re-use of the inclusion proof verification method. +// +// As each peak is an interior node, and as each interior node commits to the +// number of nodes under it (the count of nodes at that point) there is only one +// possible location the node can exist in the tree. If node x is in both mmr A +// and mmr B then it is included in exactly the same position. +// +// Verification will first show that the root of A can be re-produced from MMR +// B, and then proceed to checking the inclusion proofs for the A peaks in mmr +// B. +func IndexConsistencyProofBagged( + mmrSizeA, mmrSizeB uint64, store indexStoreGetter, hasher hash.Hash, +) (ConsistencyProof, error) { + + proof := ConsistencyProof{ + MMRSizeA: mmrSizeA, + MMRSizeB: mmrSizeB, + } + + // Find the peaks corresponding to the previous mmr + peaksA := PosPeaks(mmrSizeA) + + // Now generate peak proofs against the new mmr size, using the peak indices + // as the input indices to prove + for _, iPeakA := range peaksA { + peakProof, err := InclusionProofBagged(mmrSizeB, store, hasher, iPeakA-1) + if err != nil { + return ConsistencyProof{}, err + } + proof.PathBagged = append(proof.PathBagged, peakProof...) + } + return proof, nil +} diff --git a/mmr/proofold.go b/mmr/proofold.go new file mode 100644 index 0000000..fbcf4d0 --- /dev/null +++ b/mmr/proofold.go @@ -0,0 +1,68 @@ +package mmr + +// Note: the expectation is that once we are satisfied with the new methods we +// will delete this file A reason to keep it around is that testing may benefit +// from having multiple implementations of key algorithms + +// InclusionProofLocalOld is depreciated and retained only for testing +// See InclusionProofLocal instead +// +// collects the merkle root proof for the local MMR peak containing index i +// +// So for the follwing index tree, and i=15 with mmrSize = 26 we would obtain the path +// +// [H(16), H(20)] +// +// Because the local peak is 21, and given the value for 15, we only need 16 and +// then 20 to prove the local root. +// +// 3 14 +// / \ +// / \ +// / \ +// / \ +// 2 6 13 21 +// / \ / \ +// 1 2 5 9 12 17 20 24 +// / \ / \ / \ / \ / \ +// 0 0 1 3 4 7 8 10 11 15 16 18 19 22 23 25 +func InclusionProofLocalOld(mmrSize uint64, store indexStoreGetter, i uint64) ([][]byte, uint64, error) { + + var proof [][]byte + height := IndexHeight(i) // allows for proofs of interior nodes + + var err error + var value []byte + + for i < mmrSize { + iHeight := IndexHeight(i) + iNextHeight := IndexHeight(i + 1) + if iNextHeight > iHeight { + iSibling := i - SiblingOffset(height) + if iSibling >= mmrSize { + break + } + + if value, err = store.Get(iSibling); err != nil { + return nil, 0, err + } + proof = append(proof, value) + // go to parent node + i += 1 + } else { + iSibling := i + SiblingOffset(height) + if iSibling >= mmrSize { + break + } + + if value, err = store.Get(iSibling); err != nil { + return nil, 0, err + } + proof = append(proof, value) + // goto parent node + i += 2 << height + } + height += 1 + } + return proof, i, nil +} diff --git a/mmr/proofrefresh.go b/mmr/proofrefresh.go index 9392b3d..8a5d509 100644 --- a/mmr/proofrefresh.go +++ b/mmr/proofrefresh.go @@ -20,10 +20,10 @@ type ConsistencyProofLocal struct { PeakIndexB uint64 } -// IndexProofLocalExtend produces a proof which can verify for two mmr sizes +// InclusionProofLocalExtend produces a proof which can verify for two mmr sizes // It shows that the proof for mmrSizeB is an *extention* of the proof for // mmrSizeA. -func IndexProofLocalExtend(mmrSizeA, mmrSizeB uint64, store indexStoreGetter, i uint64) (ConsistencyProofLocal, error) { +func InclusionProofLocalExtend(mmrSizeA, mmrSizeB uint64, store indexStoreGetter, i uint64) (ConsistencyProofLocal, error) { height := uint64(0) diff --git a/mmr/spurs.go b/mmr/spurs.go index 8faf9d0..cb98396 100644 --- a/mmr/spurs.go +++ b/mmr/spurs.go @@ -133,9 +133,9 @@ func SpurHeightLeaf(leafIndex uint64) uint64 { // TreeIndex returns the mmr index of the i'th leaf It can also be used to // calculate the sum of all the 'alpine nodes' in the mmr blobs preceding the // blob if the blob index is substituted for iLeaf -func TreeIndex(leafIndex uint64) uint64 { +func TreeIndexOld(leafIndex uint64) uint64 { - // XXX: TODO it feels like there is a way to initialise using SpurSumHeight + // XXX: TODO it feels like there is a way to initialize using SpurSumHeight // then accumulate using some variation of the inner term of SpurSumHeight. // But the approach is already O(Log 2 n) ish. diff --git a/mmr/spurs_test.go b/mmr/spurs_test.go index ef9ba63..c5bf30a 100644 --- a/mmr/spurs_test.go +++ b/mmr/spurs_test.go @@ -2,6 +2,7 @@ package mmr import ( "fmt" + "math/bits" "testing" "github.com/stretchr/testify/assert" @@ -29,14 +30,14 @@ func TestSpurSum(t *testing.T) { } } -func TestTreeIndex(t *testing.T) { +func TestTreeIndexOld(t *testing.T) { treeIndices := []uint64{0, 1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26} for iLeaf, want := range treeIndices { t.Run(fmt.Sprintf("%d -> %d", iLeaf, want), func(t *testing.T) { var got uint64 - if got = TreeIndex(uint64(iLeaf)); got != want { - t.Errorf("TreeIndex() = %v, want %v", got, treeIndices[iLeaf]) + if got = TreeIndexOld(uint64(iLeaf)); got != want { + t.Errorf("TreeIndexOld() = %v, want %v", got, treeIndices[iLeaf]) } fmt.Printf("%d -> %d\n", iLeaf, want) }) @@ -120,6 +121,8 @@ func TestLeafMinusSpurSum(t *testing.T) { t.Run(fmt.Sprintf("%d -> %d", iLeaf, want), func(t *testing.T) { sum := LeafMinusSpurSum(uint64(iLeaf)) assert.Equal(t, sum, want) + sum2 := uint64(bits.OnesCount64(uint64(iLeaf))) + assert.Equal(t, sum, sum2) // Test that the stack like property is maintained top := uint64(0) @@ -129,7 +132,7 @@ func TestLeafMinusSpurSum(t *testing.T) { top -= delta // pop top += 1 // push // fmt.Printf("%02d: %d", i, a) - // ancestors := mmr.LeftAncestors(mmr.TreeIndex(i)) + // ancestors := mmr.LeftAncestors(mmr.MMRIndex(i)) // fmt.Printf("%02d: %d %d %d: ", i, top+delta, delta, top) // for _, a := range ancestors { // fmt.Printf("%d ", a) diff --git a/mmr/testdb_test.go b/mmr/testdb_test.go index 0f182cf..88f4746 100644 --- a/mmr/testdb_test.go +++ b/mmr/testdb_test.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/sha256" "encoding/binary" + "encoding/hex" "fmt" "hash" "testing" @@ -38,7 +39,7 @@ func NewGeneratedTestDB(t *testing.T, mmrSize uint64) *testDb { } leafCount := LeafCount(mmrSize) for i := uint64(0); i < leafCount; i++ { - _, err := AddHashedLeaf(db, sha256.New(), hashNum(TreeIndex(i))) + _, err := AddHashedLeaf(db, sha256.New(), hashNum(MMRIndex(i))) require.NoError(t, err) } return db @@ -72,8 +73,9 @@ func TestGeneratedTestDB(t *testing.T) { ok := uint64(0) for i := uint64(0); i < mmrSize; i++ { + v := canon.mustGet(i) + fmt.Printf("|%s|%04d|%04d|\n", hex.EncodeToString(v), i, LeafCount(i)) if bytes.Compare(canon.mustGet(i), db.mustGet(i)) != 0 { - fmt.Printf("%d %d\n", i, LeafCount(i)) continue } ok++ @@ -128,10 +130,8 @@ func NewCanonicalTestDB(t *testing.T) *testDb { // 0 0 1 3 4 7 8 10 11 15 16 18 19 22 23 25 26 31 32 34 35 38 // . 0 . 1 2 . 3 .4 . 5 6 . 7 8 . 9 10 11 12 13 14 15 16 17 18 19 20 - // XXX: TODO update this for position commitment in interior nodes db := testDb{ t: t, store: make(map[uint64][]byte), - // next: uint64(19), next: uint64(39), } @@ -237,7 +237,6 @@ func (db *testDb) hashPair(pos, i, j uint64) []byte { if value, err = db.Get(i); err != nil { db.t.Fatalf("index %v not found", i) } - // XXX: TODO: position commitment for inner leaves h.Write(value) if value, err = db.Get(j); err != nil { db.t.Fatalf("index %v not found", i) diff --git a/mmr/verify.go b/mmr/verify.go index 49ae3fb..0a19e26 100644 --- a/mmr/verify.go +++ b/mmr/verify.go @@ -2,155 +2,80 @@ package mmr import ( "bytes" + "errors" + "fmt" "hash" ) -// VerifyInclusion returns true if the provided proof demonstrates inclusion of -// nodeHash at position iLeaf+1 -// -// proof and root should be obtained via IndexProof and GetRoot respectively. -// -// Remembering that the proof layout is this: -// -// [local-peak-proof-i, right-sibling-of-i, left-of-i-peaks-reversed] -// -// And given the following MMR -// -// 3 15 -// / \ -// / \ -// / \ -// 2 7 14 22 -// / \ / \ / \ -// 1 3 6 10 13 18 21 25 -// / \ / \ / \ / \ / \ / \ / \ -// 0 1 2 4 5 8 9 11 12 16 17 19 20 23 24 26 -// -// Note that only the local-peak-proof-i elements will include the commitment to -// the number of descendent tree nodes. This means we must include H(pos) for -// each step in local-peak-proof-i, but then exclude it in all the others. -// -// So if we have a proof for leaf position 17 (iLeaf=16) the proof will be -// composed of the local peak proof for 17, which is -// -// [ValueAt(16), ValueAt(21), Bagged-Peaks-RHS, Reveresed-LHS-Peaks] -// -// To correctly account for the position in the proof, we need to pre-pend the -// position for each element in the local peak proof: -// -// H(22 | V(21) | H(18|leaf|V(16))) -// -// Remembering that, confusingly, we always include the value for the 'right' -// node first despite the fact that reading order makes this seem 'on the left' +var ( + ErrVerifyInclusionFailed = errors.New("verify inclusion failed") +) + func VerifyInclusion( - mmrSize uint64, hasher hash.Hash, nodeHash []byte, iNode uint64, proof [][]byte, root []byte, -) bool { - ok, proofLen := VerifyFirstInclusionPath(mmrSize, hasher, nodeHash, iNode, proof, root) - return ok && proofLen == len(proof) + store indexStoreGetter, hasher hash.Hash, mmrSize uint64, leafHash []byte, iNode uint64, proof [][]byte, +) (bool, error) { + + peaks, err := PeakHashes(store, mmrSize-1) + if err != nil { + return false, err + } + + // Get the index of the peak commiting the proven element + ipeak := PeakIndex(LeafCount(mmrSize), len(proof)) + + if ipeak >= len(peaks) { + return false, fmt.Errorf( + "%w: accumulator index for proof out of range for the provided mmr size", ErrVerifyInclusionFailed) + } + + root := IncludedRoot(hasher, iNode, leafHash, proof) + if !bytes.Equal(root, peaks[ipeak]) { + return false, fmt.Errorf( + "%w: proven root not present in the accumulator", ErrVerifyInclusionFailed) + } + return true, nil } -// VerifyFirstInclusionPath process the proof until it re-produces the root -// -// This method exists for the situation where multiple, possibly related, proofs -// are catenated together in the same path. As they are in log consistency proofs. -// See [datatrails/go-datatrails-merklelog/merklelog/mmr/VerifyInclusion] for further details. +// VerifyInclusionPath returns true if the leafHash combined with path, reproduces the provided root // -// Returns +// To facilitate the concatenated proof paths used for consistency proofs, it +// returns the count of path elements used to reach the root. // -// true and the length of the verified path in proof on success. -// false if it reaches the end of proof. -func VerifyFirstInclusionPath( +// root: The local "peak" root in which leafHash is recorded. This root is a +// member of the current mmr accumulator, or is itself a node which can be verified +// for inclusion in a future accumulator. +func VerifyInclusionPath( mmrSize uint64, hasher hash.Hash, leafHash []byte, iNode uint64, proof [][]byte, root []byte, ) (bool, int) { - peaks := Peaks(mmrSize) - peakMap := map[uint64]bool{} - // Deal with the degenerate case where iNode is a perfect peak. The proof will be nil. - if len(proof) == 0 && bytes.Compare(leafHash, root) == 0 { + if len(proof) == 0 && bytes.Equal(leafHash, root) { return true, 0 } - height := IndexHeight(iNode) // allows for proofs of interior nodes pos := iNode + 1 + heightIndex := PosHeight(pos) // allows for proofs of interior nodes elementHash := leafHash - // The peaks are listed smallest to highest, and the proof starts with the - // local peak proof, so the first peak larger than iLeaf+1 can safely be - // used to spot the completion of the local peak proof. - var localPeak uint64 - for _, peakPos := range peaks { - // Note the position of the local peak, so we can spot when the local proof is complete - if localPeak == 0 && peakPos >= pos { - localPeak = peakPos - } - peakMap[peakPos] = true - } - for iProof, p := range proof { hasher.Reset() - // This first clause deals with accumulating the peak hashes. The first - // time it hits will be the peak for the local tree containing iLeaf. - // There are 3 cases: - // a) The mmr size is 1, and so iLeaf = pos -1 - // b) The mmr has a size that leaves a singleton at the lowest end of the MMR range. - // c) The normal local peak case - // - // Both a) and b) would be dealt with on the first pass, c) is triggered - // after we have traversed and accumulated the leaf proof for the local - // tree - if _, ok := peakMap[pos]; ok { - - if pos == peaks[len(peaks)-1] { - - // case a) or c) - hasher.Write(elementHash) - hasher.Write(p) - } else { - // case b) or c) - hasher.Write(p) - hasher.Write(elementHash) - pos = peaks[len(peaks)-1] - } - elementHash = hasher.Sum(nil) - if bytes.Equal(elementHash, root) { - // If we have the root then we have successfully completed the - // current proof. Return the index for the start of the next - return true, iProof + 1 - } - - continue - } - - // verify the merkle path - posHeight := PosHeight(pos) - posHeightNext := PosHeight(pos + 1) - - if posHeightNext > posHeight { + // If the next node is higher, are at the right child, and the left otherwise + if PosHeight(pos+1) > heightIndex { // we are at the right child - // Advance pos first, so we can use the parent pos to decide wether - // we are still processing the local peak proof. pos += 1 - if pos <= localPeak { - HashWriteUint64(hasher, pos) // pos is now the parent pos, which was also the commit value - } + HashWriteUint64(hasher, pos) // pos is now the parent pos, which was also the commit value hasher.Write(p) hasher.Write(elementHash) } else { // we are at the left child - // Advance pos first, so we can use the parent pos to decide wether - // we are still processing the local peak proof. - pos += 2 << height - if pos <= localPeak { - HashWriteUint64(hasher, pos) // pos is now the parent pos, which was also the commit value - } + pos += 2 << heightIndex + HashWriteUint64(hasher, pos) // pos is now the parent pos, which was also the commit value hasher.Write(elementHash) hasher.Write(p) - } elementHash = hasher.Sum(nil) @@ -161,7 +86,7 @@ func VerifyFirstInclusionPath( return true, iProof + 1 } - height += 1 + heightIndex += 1 } return false, len(proof) } diff --git a/mmr/verify_test.go b/mmr/verify_test.go index 8dc4618..bad953b 100644 --- a/mmr/verify_test.go +++ b/mmr/verify_test.go @@ -1,8 +1,8 @@ package mmr import ( + "bytes" "crypto/sha256" - "errors" "fmt" "testing" @@ -10,324 +10,40 @@ import ( "github.com/stretchr/testify/require" ) -func getNodes(db *testDb, iNodes ...uint64) [][]byte { - var hashes [][]byte - for i := 0; i < len(iNodes); i++ { - hashes = append(hashes, db.mustGet(iNodes[i])) - } - return hashes -} - -// TestVerifyLeavesIn38 check that we can obtain and verify proofs for all 38 leaves +// TestVerifyLeavesIn38Bagged check that we can obtain and verify proofs for all 38 leaves func TestVerifyLeavesIn38(t *testing.T) { hasher := sha256.New() db := NewCanonicalTestDB(t) - mmrSize := db.Next() - numLeafs := LeafCount(mmrSize) - - root, err := GetRoot(mmrSize, db, hasher) - if err != nil { - t.Errorf("GetRoot() err: %v", err) - } - - verifiedOk := uint64(0) - for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { - // for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { - iNode := TreeIndex(iLeaf) - - proof, err := IndexProof(mmrSize, db, hasher, iNode) - require.NoError(t, err) - - nodeHash, err := db.Get(iNode) - require.NoError(t, err) - - if !VerifyInclusion(mmrSize, hasher, nodeHash, iNode, proof, root) { - fmt.Printf("%d %d VerifyInclusion() failed\n", iNode, iLeaf) - } else { - verifiedOk++ - } - } - assert.Equal(t, verifiedOk, numLeafs) - // fmt.Printf("VerifyInclusion() ok size=%d, leaves=%d, ok=%d\n", mmrSize, numLeafs, verifiedOk) -} - -// TestVerify38 check that we can obtain and verify proofs for all 38 *nodes* -func TestVerify38(t *testing.T) { - hasher := sha256.New() - db := NewCanonicalTestDB(t) - mmrSize := db.Next() - - root, err := GetRoot(mmrSize, db, hasher) - if err != nil { - t.Errorf("GetRoot() err: %v", err) - } - - verifiedOk := uint64(0) - for iNode := uint64(0); iNode < mmrSize; iNode++ { - // for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { - // iNode := TreeIndex(iLeaf) - - proof, err := IndexProof(mmrSize, db, hasher, iNode) - require.NoError(t, err) - - nodeHash, err := db.Get(iNode) - require.NoError(t, err) - - if !VerifyInclusion(mmrSize, hasher, nodeHash, iNode, proof, root) { - fmt.Printf("%d %d VerifyInclusion() failed\n", iNode, iNode) - } else { - verifiedOk++ - } - } - assert.Equal(t, verifiedOk, mmrSize) - // fmt.Printf("VerifyInclusion() ok size=%d, leaves=%d, ok=%d\n", mmrSize, numLeafs, verifiedOk) -} - -// TestVerifyPerfectRoots checks we can produce and verify proofs for the -// perfect peaks, which should be just the peaks them selves -func TestVerifyPerfectRoots(t *testing.T) { - hasher := sha256.New() - - verifiedOk := 0 - - sizes := []uint64{3, 7, 15, 31, 63} - for _, mmrSize := range sizes { - db := NewGeneratedTestDB(t, mmrSize) - - root, err := GetRoot(mmrSize, db, hasher) - if err != nil { - t.Errorf("GetRoot() err: %v", err) - } - - iNode := mmrSize - 1 - proof, err := IndexProof(mmrSize, db, hasher, iNode) - require.NoError(t, err) - - nodeHash, err := db.Get(iNode) - require.NoError(t, err) - - if !VerifyInclusion(mmrSize, hasher, nodeHash, iNode, proof, root) { - fmt.Printf("%d %d VerifyInclusion() failed\n", iNode, iNode) - } else { - verifiedOk++ - } - } - assert.Equal(t, verifiedOk, len(sizes)) - // fmt.Printf("VerifyInclusion() ok size=%d, leaves=%d, ok=%d\n", mmrSize, numLeafs, verifiedOk) -} - -func TestVerifyIndex30InSize63(t *testing.T) { - - hasher := sha256.New() - // 63 is the first mmr with a hieght of 5 (and so is a perfect peak) - db := NewGeneratedTestDB(t, 63) - root, err := GetRoot(63, db, hasher) - require.NoError(t, err) - peakProof, err := IndexProof(63, db, hasher, 30) - require.NoError(t, err) - peakHash := db.mustGet(30) - ok := VerifyInclusion(63, hasher, peakHash, 30, peakProof, root) - assert.True(t, ok) -} - -// TestReVerify38ForAllSizes -// Test that as the mmr grows, the previously verified nodes continue to be -// provable and verifiable. Note that the proofs will be different as the tree -// root changes with the size. However, note also that any historic proof can be -// shown to be a 'sub-proof' of the new accumulator state and hence verifiable -// or exchangeable at any time. -// bug-9026 -func TestReVerify38ForAllSizes(t *testing.T) { - hasher := sha256.New() - // db := NewCanonicalTestDB(t) - db := NewGeneratedTestDB(t, 63) - maxMMRSize := db.Next() - numLeafs := LeafCount(maxMMRSize) + mmrMaxSize := db.Next() + numLeafs := LeafCount(mmrMaxSize) for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { - iNode := TreeIndex(iLeaf) + mmrIndex := MMRIndex(iLeaf) - // Check that all valid mmr sizes which contain the node can generate verifiable proofs for it. - // - // iLeaf is the leaf we are interested in ensuring verification for. - // jLeaf is used to derive all the successive mmrSizes that continue to contain iLeaf - for jLeaf := iLeaf; jLeaf < numLeafs; jLeaf++ { - // the spur length + the node index gives us the minimum mmrsize that contains the leaf - jNode := TreeIndex(jLeaf) - spurLen := SpurHeightLeaf(jLeaf) + for s := FirstMMRSize(MMRIndex(iLeaf)); s <= mmrMaxSize; s = FirstMMRSize(s + 1) { - jMMRSize := jNode + spurLen + 1 + // Verify each leaf in all complete mmr sizes up to the size of the canonical mmr + // for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { - root, err := GetRoot(jMMRSize, db, hasher) + proof, err := InclusionProof(db, s-1, mmrIndex) require.NoError(t, err) - // Get the proof for *** iLeaf's node *** - proof, err := IndexProof(jMMRSize, db, hasher, iNode) + + nodeHash, err := db.Get(mmrIndex) require.NoError(t, err) - if proof == nil { - // This is the iLeaf == 0 && mmrSize == 1 case which is - // peculiar. We can't really say the mmr with a single entry is - // 'provable', it just is. In reality, a customer may create a - // single event. They will get an empty receipt if they ask. - // After the next confirmation tick, forestrie will sign a - // tenant tree root. And in this case that root hash will just - // be the single node. In this specific case, data trails - // attestation is just the signed root. This peculiar case goes - // away as soon as the second event is recorded. - assert.Equal(t, db.mustGet(iNode), root) - assert.Equal(t, iNode, uint64(0)) - assert.Equal(t, jMMRSize, uint64(1)) - } else { - nodeHash, err := db.Get(iNode) - require.NoError(t, err) - // verify iNode using the j mmr size. - ok := VerifyInclusion(jMMRSize, hasher, nodeHash, iNode, proof, root) - assert.Equal(t, ok, true) + accumulator, err := PeakHashes(db, s-1) + require.NoError(t, err) + iacc := PeakIndex(LeafCount(s), len(proof)) + require.Less(t, iacc, len(accumulator)) + peak := accumulator[iacc] + root := IncludedRoot(hasher, mmrIndex, nodeHash, proof) + if !bytes.Equal(root, peak) { + fmt.Printf("%d %d TestVerifyLeavesIn38 failed\n", mmrIndex, iLeaf) } + assert.Equal(t, root, peak) } } -} - -func TestVerify(t *testing.T) { - - hasher := sha256.New() - db := NewCanonicalTestDB(t) - // mmrSize := uint64(39) - - H := func(i uint64) []byte { - return db.mustGet(i) - } - - getProof := func(mmrSize uint64, i uint64) [][]byte { - proof, err := IndexProof(mmrSize, db, hasher, i) - require.NoError(t, err) - if mmrSize == 1 && proof != nil { - t.Errorf("IndexProof() err: %v", errors.New("mmr size 1 should return nil proof")) - return nil - } - return proof - } - - verify := func(mmrSize uint64, nodeHash []byte, iNode uint64, proof [][]byte) bool { - root, err := GetRoot(mmrSize, db, hasher) - require.NoError(t, err) - if mmrSize == 1 { - // special case - return proof == nil - } - return VerifyInclusion(mmrSize, hasher, nodeHash, iNode, proof, root) - } - - type proofNodes struct { - iLocalPeak uint64 - local []uint64 - peaksRHS []uint64 - peaksLHS []uint64 - } - - type args struct { - mmrSize uint64 - leafHash []byte - iLeaf uint64 - proof [][]byte - } - tests := []struct { - name string - args args - want bool - expectProofNodes *proofNodes - }{ - { // this fails - "prove leaf index 22 for sz 26", - args{26, H(22), 22, getProof(26, 22)}, - true, - &proofNodes{ - iLocalPeak: 24, - local: []uint64{23}, - peaksRHS: []uint64{25}, - peaksLHS: []uint64{14, 21}, - }, - }, - - { // this is ok - "prove leaf index 19 for sz 26", - args{26, H(19), 19, getProof(26, 19)}, true, - &proofNodes{ - iLocalPeak: 21, - local: []uint64{18, 17}, - peaksRHS: []uint64{24, 25}, - peaksLHS: []uint64{14}, - }, - }, - { - "prove leaf index 23 for sz 25", - args{25, H(23), 23, getProof(25, 23)}, - true, - &proofNodes{ - iLocalPeak: 24, - local: []uint64{22}, - peaksRHS: nil, - peaksLHS: []uint64{14, 21}, - }, - }, - { - "prove leaf index 23 for sz 26", - args{26, H(23), 23, getProof(26, 23)}, true, nil, - }, - { - "prove leaf index 19 for sz 26", - args{26, H(19), 19, getProof(26, 19)}, true, nil, - }, - - { - "prove interior node index 2", - args{26, H(2), 2, getProof(26, 2)}, true, nil, - }, - { - "prove leaf index 1", - args{26, H(1), 1, getProof(26, 1)}, true, nil, - }, - - { - "prove mid range (sibling mountains either side)", - args{26, H(17 - 1), 16, getProof(26, 16)}, true, nil, - }, - { - "edge case, prove the solo leaf at the end of the range", - args{39, H(26 - 1), 25, getProof(39, 25)}, true, nil, - }, - { - "edge case, prove the first leaf in the tree", - args{26, H(0), 0, getProof(26, 0)}, true, nil, - }, - { - "edge case, prove a singleton", - args{1, H(0), 1, getProof(1, 0)}, true, nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if tt.expectProofNodes != nil { - localPath, iLocalPeak, err := IndexProofLocal(tt.args.mmrSize, db, tt.args.iLeaf) - require.NoError(t, err) - assert.Equal(t, iLocalPeak, tt.expectProofNodes.iLocalPeak, "local peak incorrect") - assert.Equal(t, localPath, getNodes(db, tt.expectProofNodes.local...)) - - peaks := Peaks(tt.args.mmrSize) - - peakHashes, err := PeakBagRHS(db, hasher, iLocalPeak+1, peaks) - require.NoError(t, err) - assert.Equal(t, peakHashes, getNodes(db, tt.expectProofNodes.peaksRHS...)) - - leftPath, err := PeaksLHS(db, iLocalPeak+1, peaks) - require.NoError(t, err) - assert.Equal(t, leftPath, getNodes(db, tt.expectProofNodes.peaksLHS...)) - } - if got := verify(tt.args.mmrSize, tt.args.leafHash, tt.args.iLeaf, tt.args.proof); got != tt.want { - t.Errorf("Verify() = %v, want %v", got, tt.want) - } - }) - } + // fmt.Printf("VerifyInclusion() ok size=%d, leaves=%d, ok=%d\n", mmrSize, numLeafs, verifiedOk) } diff --git a/mmr/verifybagged.go b/mmr/verifybagged.go new file mode 100644 index 0000000..e9a8a95 --- /dev/null +++ b/mmr/verifybagged.go @@ -0,0 +1,171 @@ +package mmr + +import ( + "bytes" + "hash" +) + +// Verification is by default against the MRR accumulator peaks (see verify.go). The "Bagged" +// variants work with proofs against a "Bagged" singular mono root for an MMR. +// We may remove these methods in future. + +// VerifyInclusionBagged returns true if the provided proof demonstrates inclusion of +// nodeHash at position iLeaf+1 +// +// proof and root should be obtained via InclusionProof and GetRoot respectively. +// +// Remembering that the proof layout is this: +// +// [local-peak-proof-i, right-sibling-of-i, left-of-i-peaks-reversed] +// +// And given the following MMR +// +// 3 15 +// / \ +// / \ +// / \ +// 2 7 14 22 +// / \ / \ / \ +// 1 3 6 10 13 18 21 25 +// / \ / \ / \ / \ / \ / \ / \ +// 0 1 2 4 5 8 9 11 12 16 17 19 20 23 24 26 +// +// Note that only the local-peak-proof-i elements will include the commitment to +// the number of descendent tree nodes. This means we must include H(pos) for +// each step in local-peak-proof-i, but then exclude it in all the others. +// +// So if we have a proof for leaf position 17 (iLeaf=16) the proof will be +// composed of the local peak proof for 17, which is +// +// [ValueAt(16), ValueAt(21), Bagged-Peaks-RHS, Reveresed-LHS-Peaks] +// +// To correctly account for the position in the proof, we need to pre-pend the +// position for each element in the local peak proof: +// +// H(22 | V(21) | H(18|leaf|V(16))) +// +// Remembering that, confusingly, we always include the value for the 'right' +// node first despite the fact that reading order makes this seem 'on the left' +func VerifyInclusionBagged( + mmrSize uint64, hasher hash.Hash, nodeHash []byte, iNode uint64, proof [][]byte, root []byte, +) bool { + ok, proofLen := VerifyFirstInclusionPathBagged(mmrSize, hasher, nodeHash, iNode, proof, root) + return ok && proofLen == len(proof) +} + +// VerifyFirstInclusionPathBagged process the proof until it re-produces the "bagged" root of the MMR +// +// This method exists for the situation where multiple, possibly related, proofs +// are catenated together in the same path. As they are in log consistency +// proofs, when they are proven against a mono root. +// See [datatrails/go-datatrails-merklelog/merklelog/mmr/VerifyInclusion] for further details. +// +// Returns +// +// true and the length of the verified path in proof on success. +// false if it reaches the end of proof. +func VerifyFirstInclusionPathBagged( + mmrSize uint64, hasher hash.Hash, leafHash []byte, iNode uint64, proof [][]byte, root []byte, +) (bool, int) { + + peaks := PosPeaks(mmrSize) + peakMap := map[uint64]bool{} + + // Deal with the degenerate case where iNode is a perfect peak. The proof will be nil. + if len(proof) == 0 && bytes.Equal(leafHash, root) { + return true, 0 + } + + heightIndex := IndexHeight(iNode) // allows for proofs of interior nodes + pos := iNode + 1 + elementHash := leafHash + + // The peaks are listed smallest to highest, and the proof starts with the + // local peak proof, so the first peak larger than iLeaf+1 can safely be + // used to spot the completion of the local peak proof. + var localPeak uint64 + for _, peakPos := range peaks { + // Note the position of the local peak, so we can spot when the local proof is complete + if localPeak == 0 && peakPos >= pos { + localPeak = peakPos + } + peakMap[peakPos] = true + } + + for iProof, p := range proof { + + hasher.Reset() + + // This first clause deals with accumulating the peak hashes. The first + // time it hits will be the peak for the local tree containing iLeaf. + // There are 3 cases: + // a) The mmr size is 1, and so iLeaf = pos -1 + // b) The mmr has a size that leaves a singleton at the lowest end of the MMR range. + // c) The normal local peak case + // + // Both a) and b) would be dealt with on the first pass, c) is triggered + // after we have traversed and accumulated the leaf proof for the local + // tree + if _, ok := peakMap[pos]; ok { + + if pos == peaks[len(peaks)-1] { + + // case a) or c) + hasher.Write(elementHash) + hasher.Write(p) + } else { + // case b) or c) + hasher.Write(p) + hasher.Write(elementHash) + pos = peaks[len(peaks)-1] + } + elementHash = hasher.Sum(nil) + if bytes.Equal(elementHash, root) { + // If we have the root then we have successfully completed the + // current proof. Return the index for the start of the next + return true, iProof + 1 + } + + continue + } + + // verify the merkle path + nextHeight := PosHeight(pos + 1) + + if nextHeight > heightIndex { + // we are at the right child + + // Advance pos first, so we can use the parent pos to decide wether + // we are still processing the local peak proof. + pos += 1 + if pos <= localPeak { + HashWriteUint64(hasher, pos) // pos is now the parent pos, which was also the commit value + } + hasher.Write(p) + hasher.Write(elementHash) + } else { + // we are at the left child + + // Advance pos first, so we can use the parent pos to decide wether + // we are still processing the local peak proof. + pos += 2 << heightIndex + if pos <= localPeak { + HashWriteUint64(hasher, pos) // pos is now the parent pos, which was also the commit value + } + hasher.Write(elementHash) + hasher.Write(p) + + } + + elementHash = hasher.Sum(nil) + + if bytes.Equal(elementHash, root) { + // If we have the root then we have successfully completed the + // current proof. Return the index for the start of the next + return true, iProof + 1 + } + + heightIndex += 1 + } + return false, len(proof) +} diff --git a/mmr/verifybagged_test.go b/mmr/verifybagged_test.go new file mode 100644 index 0000000..8b5f045 --- /dev/null +++ b/mmr/verifybagged_test.go @@ -0,0 +1,419 @@ +package mmr + +import ( + "crypto/sha256" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func getNodes(db *testDb, iNodes ...uint64) [][]byte { + var hashes [][]byte + for i := 0; i < len(iNodes); i++ { + hashes = append(hashes, db.mustGet(iNodes[i])) + } + return hashes +} + +// TestVerifyLeavesIn38Bagged check that we can obtain and verify proofs for all 38 leaves +func TestVerifyLeavesIn38Bagged(t *testing.T) { + hasher := sha256.New() + db := NewCanonicalTestDB(t) + mmrSize := db.Next() + numLeafs := LeafCount(mmrSize) + + root, err := GetRoot(mmrSize, db, hasher) + if err != nil { + t.Errorf("GetRoot() err: %v", err) + } + + verifiedOk := uint64(0) + for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { + // for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { + iNode := MMRIndex(iLeaf) + + proof, err := InclusionProofBagged(mmrSize, db, hasher, iNode) + require.NoError(t, err) + + nodeHash, err := db.Get(iNode) + require.NoError(t, err) + + if !VerifyInclusionBagged(mmrSize, hasher, nodeHash, iNode, proof, root) { + fmt.Printf("%d %d VerifyInclusion() failed\n", iNode, iLeaf) + } else { + verifiedOk++ + } + } + assert.Equal(t, verifiedOk, numLeafs) + // fmt.Printf("VerifyInclusion() ok size=%d, leaves=%d, ok=%d\n", mmrSize, numLeafs, verifiedOk) +} + +// TestVerify38Bagged check that we can obtain and verify proofs for all 38 *nodes* +func TestVerify38Bagged(t *testing.T) { + hasher := sha256.New() + db := NewCanonicalTestDB(t) + mmrSize := db.Next() + + root, err := GetRoot(mmrSize, db, hasher) + if err != nil { + t.Errorf("GetRoot() err: %v", err) + } + + verifiedOk := uint64(0) + for iNode := uint64(0); iNode < mmrSize; iNode++ { + // for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { + // iNode := MMRIndex(iLeaf) + + proof, err := InclusionProofBagged(mmrSize, db, hasher, iNode) + require.NoError(t, err) + + nodeHash, err := db.Get(iNode) + require.NoError(t, err) + + if !VerifyInclusionBagged(mmrSize, hasher, nodeHash, iNode, proof, root) { + fmt.Printf("%d %d VerifyInclusion() failed\n", iNode, iNode) + } else { + verifiedOk++ + } + } + assert.Equal(t, verifiedOk, mmrSize) + // fmt.Printf("VerifyInclusion() ok size=%d, leaves=%d, ok=%d\n", mmrSize, numLeafs, verifiedOk) +} + +// TestVerifyPerfectRootsBagged checks we can produce and verify proofs for the +// perfect peaks, which should be just the peaks them selves +func TestVerifyPerfectRootsBagged(t *testing.T) { + hasher := sha256.New() + + verifiedOk := 0 + + sizes := []uint64{3, 7, 15, 31, 63} + for _, mmrSize := range sizes { + db := NewGeneratedTestDB(t, mmrSize) + + root, err := GetRoot(mmrSize, db, hasher) + if err != nil { + t.Errorf("GetRoot() err: %v", err) + } + + iNode := mmrSize - 1 + proof, err := InclusionProofBagged(mmrSize, db, hasher, iNode) + require.NoError(t, err) + + nodeHash, err := db.Get(iNode) + require.NoError(t, err) + + if !VerifyInclusionBagged(mmrSize, hasher, nodeHash, iNode, proof, root) { + fmt.Printf("%d %d VerifyInclusion() failed\n", iNode, iNode) + } else { + verifiedOk++ + } + } + assert.Equal(t, verifiedOk, len(sizes)) + // fmt.Printf("VerifyInclusion() ok size=%d, leaves=%d, ok=%d\n", mmrSize, numLeafs, verifiedOk) +} + +func TestVerifyIndex30InSize63Bagged(t *testing.T) { + + hasher := sha256.New() + // 63 is the first mmr with a hieght of 5 (and so is a perfect peak) + db := NewGeneratedTestDB(t, 63) + root, err := GetRoot(63, db, hasher) + require.NoError(t, err) + peakProof, err := InclusionProofBagged(63, db, hasher, 30) + require.NoError(t, err) + peakHash := db.mustGet(30) + ok := VerifyInclusionBagged(63, hasher, peakHash, 30, peakProof, root) + assert.True(t, ok) +} + +// TestReVerify38ForAllSizes +// Test that as the mmr grows, the previously verified nodes continue to be +// provable and verifiable. Note that the proofs will be different as the tree +// root changes with the size. However, note also that any historic proof can be +// shown to be a 'sub-proof' of the new accumulator state and hence verifiable +// or exchangeable at any time. +// bug-9026 +func TestReVerify38ForAllSizesBagged(t *testing.T) { + hasher := sha256.New() + // db := NewCanonicalTestDB(t) + db := NewGeneratedTestDB(t, 63) + maxMMRSize := db.Next() + numLeafs := LeafCount(maxMMRSize) + + for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { + + iNode := MMRIndex(iLeaf) + + // Check that all valid mmr sizes which contain the node can generate verifiable proofs for it. + // + // iLeaf is the leaf we are interested in ensuring verification for. + // jLeaf is used to derive all the successive mmrSizes that continue to contain iLeaf + for jLeaf := iLeaf; jLeaf < numLeafs; jLeaf++ { + // the spur length + the node index gives us the minimum mmrsize that contains the leaf + jNode := MMRIndex(jLeaf) + spurLen := SpurHeightLeaf(jLeaf) + + jMMRSize := jNode + spurLen + 1 + + root, err := GetRoot(jMMRSize, db, hasher) + require.NoError(t, err) + // Get the proof for *** iLeaf's node *** + proof, err := InclusionProofBagged(jMMRSize, db, hasher, iNode) + require.NoError(t, err) + if proof == nil { + // This is the iLeaf == 0 && mmrSize == 1 case which is + // peculiar. We can't really say the mmr with a single entry is + // 'provable', it just is. In reality, a customer may create a + // single event. They will get an empty receipt if they ask. + // After the next confirmation tick, forestrie will sign a + // tenant tree root. And in this case that root hash will just + // be the single node. In this specific case, data trails + // attestation is just the signed root. This peculiar case goes + // away as soon as the second event is recorded. + assert.Equal(t, db.mustGet(iNode), root) + assert.Equal(t, iNode, uint64(0)) + assert.Equal(t, jMMRSize, uint64(1)) + } else { + nodeHash, err := db.Get(iNode) + require.NoError(t, err) + + // verify iNode using the j mmr size. + ok := VerifyInclusionBagged(jMMRSize, hasher, nodeHash, iNode, proof, root) + assert.Equal(t, ok, true) + } + } + } +} + +func TestVerify(t *testing.T) { + + hasher := sha256.New() + db := NewCanonicalTestDB(t) + // mmrSize := uint64(39) + + H := func(i uint64) []byte { + return db.mustGet(i) + } + + getProofBagged := func(mmrSize uint64, i uint64) [][]byte { + proof, err := InclusionProofBagged(mmrSize, db, hasher, i) + require.NoError(t, err) + if mmrSize == 1 && proof != nil { + t.Errorf("InclusionProof() err: %v", errors.New("mmr size 1 should return nil proof")) + return nil + } + return proof + } + getProof := func(mmrSize uint64, i uint64) [][]byte { + proof, err := InclusionProof(db, mmrSize-1, i) + require.NoError(t, err) + if mmrSize == 1 && proof != nil { + t.Errorf("InclusionProof() err: %v", errors.New("mmr size 1 should return nil proof")) + return nil + } + return proof + } + + verifyBagged := func(mmrSize uint64, nodeHash []byte, mmrIndex uint64, proof [][]byte) bool { + root, err := GetRoot(mmrSize, db, hasher) + require.NoError(t, err) + if mmrSize == 1 { + // special case + return proof == nil + } + baggedOk := VerifyInclusionBagged(mmrSize, hasher, nodeHash, mmrIndex, proof, root) + return baggedOk + // ok, lenProofUsed := VerifyInclusionPath(mmrSize, hasher, nodeHash, iNode, proof, root) + // return baggedOk && ok && lenProofUsed == len(proof) + } + verify := func(mmrSize uint64, nodeHash []byte, mmrIndex uint64, proof [][]byte) (bool, int) { + + // To account for interior nodes, we add the height of the node to the proof length. + nodeHeightIndex := IndexHeight(mmrIndex) + d := len(proof) + int(nodeHeightIndex) + + // get the index into the accumulator + // peakMap is also the leaf count, which is often also known + peakMap := LeafCount(mmrSize) + peakIndex := PeakIndex(peakMap, d) + peakHashes, err := PeakHashes(db, mmrSize-1) + require.Less(t, peakIndex, len(peakHashes)) + require.NoError(t, err) + root := peakHashes[peakIndex] + + return VerifyInclusionPath(mmrSize, hasher, nodeHash, mmrIndex, proof, root) + } + + type proofNodes struct { + iLocalPeak uint64 + localHeightIndex uint64 + local []uint64 + peaksRHS []uint64 + peaksLHS []uint64 + } + + type args struct { + mmrSize uint64 + leafHash []byte + mmrIndex uint64 + proofBagged [][]byte + proof [][]byte + } + tests := []struct { + name string + args args + want bool + expectProofNodes *proofNodes + }{ + { + "prove node index 0 in MMR(3)", + args{3, H(0), 0, getProofBagged(3, 0), getProof(3, 0)}, true, nil, + }, + { + "prove node index 0 in MMR(7)", + args{7, H(0), 0, getProofBagged(7, 0), getProof(7, 0)}, true, nil, + }, + + { + "prove interior node index 2", + args{26, H(2), 2, getProofBagged(26, 2), getProof(26, 2)}, true, nil, + }, + + { + "prove leaf node index 23 for sz 25", + args{25, H(23), 23, getProofBagged(25, 23), getProof(25, 23)}, + true, + &proofNodes{ + iLocalPeak: 24, + localHeightIndex: 1, + local: []uint64{22}, + peaksLHS: []uint64{14, 21}, + peaksRHS: nil, + }, + }, + + { + "prove leaf node index 7 for sz 11", + args{11, H(7), 7, getProofBagged(11, 7), getProof(11, 7)}, + true, + &proofNodes{ + iLocalPeak: 9, + localHeightIndex: 1, + local: []uint64{8}, + peaksLHS: []uint64{6}, + peaksRHS: []uint64{10}, + }, + }, + { + "prove leaf node index 7 for sz 19", + args{19, H(7), 7, getProofBagged(19, 7), getProof(19, 7)}, + true, + &proofNodes{ + iLocalPeak: 14, + localHeightIndex: 3, + local: []uint64{8, 12, 6}, + peaksLHS: nil, + peaksRHS: []uint64{17, 18}, + }, + }, + + { // this fails + "prove leaf node index 22 for sz 26", + args{26, H(22), 22, getProofBagged(26, 22), getProof(26, 22)}, + true, + &proofNodes{ + iLocalPeak: 24, + localHeightIndex: 1, + local: []uint64{23}, + peaksLHS: []uint64{14, 21}, + peaksRHS: []uint64{25}, + }, + }, + + { // this is ok + "prove leaf node index 19 for sz 26", + args{26, H(19), 19, getProofBagged(26, 19), getProof(26, 19)}, true, + &proofNodes{ + iLocalPeak: 21, + localHeightIndex: 2, + local: []uint64{18, 17}, + peaksLHS: []uint64{14}, + peaksRHS: []uint64{24, 25}, + }, + }, + + { + "prove leaf node index 23 for sz 26", + args{26, H(23), 23, getProofBagged(26, 23), getProof(26, 23)}, true, nil, + }, + { + "prove leaf node index 19 for sz 26", + args{26, H(19), 19, getProofBagged(26, 19), getProof(26, 19)}, true, nil, + }, + + { + "prove leaf node index 1", + args{26, H(1), 1, getProofBagged(26, 1), getProof(26, 1)}, true, nil, + }, + + { + "prove mid range (sibling mountains either side)", + args{26, H(17 - 1), 16, getProofBagged(26, 16), getProof(26, 16)}, true, nil, + }, + { + "edge case, prove the solo leaf at the end of the range", + args{39, H(26 - 1), 25, getProofBagged(39, 25), getProof(39, 25)}, true, nil, + }, + { + "edge case, prove the first leaf in the tree", + args{26, H(0), 0, getProofBagged(26, 0), getProof(26, 0)}, true, nil, + }, + { + "edge case, prove a singleton", + args{1, H(0), 1, getProofBagged(1, 0), getProof(1, 0)}, true, nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.expectProofNodes != nil { + localPath, iLocalPeak, err := InclusionProofLocal( + tt.args.mmrSize, db, tt.args.mmrIndex) + localHeightIndex := len(localPath) + require.NoError(t, err) + assert.Equal(t, tt.expectProofNodes.iLocalPeak, iLocalPeak, "local peak incorrect") + assert.Equal(t, getNodes(db, tt.expectProofNodes.local...), localPath) + + peaks := PosPeaks(tt.args.mmrSize) + + peakBits := PeaksBitmap(tt.args.mmrSize) + + // the index into the packed accumulator peaks is the count how + // many bits are set *above* localHeightIndex in the mask + iPeak := PeakIndex(peakBits, len(localPath)) + // iPeak := bits.OnesCount64(peakBits & ^((1< MMR(B) +// 7 in MMR(B) -> [] +// 8 in MMR(B) -> [9] +// Path = [[], [9]] +func VerifyConsistency( + hasher hash.Hash, + cp ConsistencyProof, peaksFrom [][]byte, peaksTo [][]byte) (bool, [][]byte, error) { - // Establish the node indices of the peaks in the original mmr A. Those - // peak nodes must be at the same indices in mmr B for the update to be - // considered consistent. However, if mmr b has additional entries at all, - // some or all of those peaks from A will no longer be peaks in B. - peakPositions := Peaks(proof.MMRSizeA) - - var ok bool - iPeakHashA := 0 - path := proof.Path - for ; iPeakHashA < len(peakHashesA); iPeakHashA++ { - - // Verify that the peak from A is included in mmr B. As the interior - // node hashes commit the node position in the log, this can only - // succeed if the peaks are both included and placed in the same - // position. - nodeHash := peakHashesA[iPeakHashA] - - var proofLen int - - ok, proofLen = VerifyFirstInclusionPath( - proof.MMRSizeB, hasher, nodeHash, peakPositions[iPeakHashA]-1, - path, rootB) - if !ok || proofLen > len(path) { - return false - } - path = path[proofLen:] + // Get the peaks proven by the consistency proof using the provided peaks + // for mmr size A + proven, err := ConsistentRoots(hasher, cp.MMRSizeA-1, peaksFrom, cp.Path) + if err != nil { + return false, nil, err } - // Note: only return true if we have verified the complete path. - return ok && len(path) == 0 -} + // If all proven nodes match an accumulator peak for MMR(sizeB) then MMR(sizeA) + // is consistent with MMR(sizeB). Because both the peaks and the accumulator + // peaks are listed in descending order of height this can be accomplished + // with a linear scan. -// CheckConsistency is used to check that a new log update is consistent With -// respect to some previously known root and the current store. -func CheckConsistency( - store indexStoreGetter, hasher hash.Hash, - cp ConsistencyProof, rootA []byte) (bool, []byte, error) { + ito := 0 + for _, root := range proven { - iPeaks := Peaks(cp.MMRSizeA) + if bytes.Equal(peaksTo[ito], root) { + continue + } - // logger.Sugar.Infof(".... PeakBagRHS: %v", iPeaks) - peakHashesA, err := PeakBagRHS(store, hasher, 0, iPeaks) - if err != nil { - return false, nil, err - } + // If the root does not match the current peak then it must match the + // next one down. - // logger.Sugar.Infof(".... GetRoot") - rootB, err := GetRoot(cp.MMRSizeB, store, hasher) - if err != nil { - return false, nil, err + ito += 1 + + if ito >= len(peaksTo) { + return false, nil, ErrConsistencyCheck + } + + if !bytes.Equal(peaksTo[ito], root) { + return false, nil, ErrConsistencyCheck + } } - return VerifyConsistency( - hasher, peakHashesA, cp, rootA, rootB), rootB, nil + // the accumulator consists of the proven peaks plus any new peaks in peaksTo. + // In the draft these new peaks are the 'right-peaks' of the consistency proof. + // Here, as ConsistentRoots requires that the peak count for the provided ifrom + // matches the number of peaks in peaksFrom, simply returning peaksTo is safe. + // Even in the corner case where proven is empty. + // + // We could do + // proven = append(proven, peaksTo[len(proven):]...) + // + // But that would be completely redundant given the loop above. + return true, peaksTo, nil } diff --git a/mmr/verifyconsistencybagged.go b/mmr/verifyconsistencybagged.go new file mode 100644 index 0000000..56659b1 --- /dev/null +++ b/mmr/verifyconsistencybagged.go @@ -0,0 +1,102 @@ +package mmr + +import ( + "bytes" + "hash" +) + +// VerifyConsistencyBagged returns true if the mmr log update from mmr a to mmr b is +// append only. This means that the new log contains an exact copy of the +// previous log, with any new nodes appended after. The proof is created by +// [datatrails/go-datatrails-merklelog/merklelog/mmr/IndexConsistencyProof] +// +// The proof comprises an single path which contains an inclusion proof for each +// peak node in the old mmr against the new mmr root. As all mmr interior nodes +// are committed to their mmr position when added, this is sufficient to show +// the new mmr contains an exact copy of the previous. And so can only be the +// result of append operations. +// +// There is, of course, some redundancy in the path, but accepting that allows +// re-use of VerifyInclusion for both consistency and inclusion proofs. +func VerifyConsistencyBagged( + hasher hash.Hash, peakHashesA [][]byte, + proof ConsistencyProof, rootA []byte, rootB []byte) bool { + + // A zero length path not valid, even in the case where the mmr's are + // identical (root a == root b) + if len(proof.PathBagged) == 0 { + return false + } + + // There must be something to prove + if len(peakHashesA) == 0 { + return false + } + + // Catch the case where mmr b is exactly mmr a + if bytes.Equal(rootA, rootB) { + return true + } + + // Check the peakHashesA, which will have been retrieved from the updated + // log, recreate rootA. rootA should have come from a previous Merkle + // Signed Root. + if !bytes.Equal(HashPeaksRHS(hasher, peakHashesA), rootA) { + return false + } + + // Establish the node indices of the peaks in the original mmr A. Those + // peak nodes must be at the same indices in mmr B for the update to be + // considered consistent. However, if mmr b has additional entries at all, + // some or all of those peaks from A will no longer be peaks in B. + peakPositions := PosPeaks(proof.MMRSizeA) + + var ok bool + iPeakHashA := 0 + path := proof.PathBagged + for ; iPeakHashA < len(peakHashesA); iPeakHashA++ { + + // Verify that the peak from A is included in mmr B. As the interior + // node hashes commit the node position in the log, this can only + // succeed if the peaks are both included and placed in the same + // position. + nodeHash := peakHashesA[iPeakHashA] + + var proofLen int + + ok, proofLen = VerifyFirstInclusionPathBagged( + proof.MMRSizeB, hasher, nodeHash, peakPositions[iPeakHashA]-1, + path, rootB) + if !ok || proofLen > len(path) { + return false + } + path = path[proofLen:] + } + + // Note: only return true if we have verified the complete path. + return ok && len(path) == 0 +} + +// CheckConsistencyBagged is used to check that a new log update is consistent With +// respect to some previously known "bagged" root and the current store. +func CheckConsistencyBagged( + store indexStoreGetter, hasher hash.Hash, + cp ConsistencyProof, rootA []byte) (bool, []byte, error) { + + iPeaks := PosPeaks(cp.MMRSizeA) + + // logger.Sugar.Infof(".... PeakBagRHS: %v", iPeaks) + peakHashesA, err := PeakBagRHS(store, hasher, 0, iPeaks) + if err != nil { + return false, nil, err + } + + // logger.Sugar.Infof(".... GetRoot") + rootB, err := GetRoot(cp.MMRSizeB, store, hasher) + if err != nil { + return false, nil, err + } + + return VerifyConsistencyBagged( + hasher, peakHashesA, cp, rootA, rootB), rootB, nil +} diff --git a/mmr/verifyold.go b/mmr/verifyold.go new file mode 100644 index 0000000..f7fd6d3 --- /dev/null +++ b/mmr/verifyold.go @@ -0,0 +1,51 @@ +package mmr + +import "hash" + +// Note: the expectation is that once we are satisfied with the new methods we +// will delete this file A reason to keep it around is that testing may benefit +// from having multiple implementations of key algorithms + +// VerifyInclusionOld returns true if the provided proof demonstrates inclusion of +// nodeHash at position iLeaf+1 +// +// proof and root should be obtained via InclusionProof and GetRoot respectively. +// +// Remembering that the proof layout is this: +// +// [local-peak-proof-i, right-sibling-of-i, left-of-i-peaks-reversed] +// +// And given the following MMR +// +// 3 15 +// / \ +// / \ +// / \ +// 2 7 14 22 +// / \ / \ / \ +// 1 3 6 10 13 18 21 25 +// / \ / \ / \ / \ / \ / \ / \ +// 0 1 2 4 5 8 9 11 12 16 17 19 20 23 24 26 +// +// Note that only the local-peak-proof-i elements will include the commitment to +// the number of descendent tree nodes. This means we must include H(pos) for +// each step in local-peak-proof-i, but then exclude it in all the others. +// +// So if we have a proof for leaf position 17 (iLeaf=16) the proof will be +// composed of the local peak proof for 17, which is +// +// [ValueAt(16), ValueAt(21), Bagged-Peaks-RHS, Reveresed-LHS-Peaks] +// +// To correctly account for the position in the proof, we need to pre-pend the +// position for each element in the local peak proof: +// +// H(22 | V(21) | H(18|leaf|V(16))) +// +// Remembering that, confusingly, we always include the value for the 'right' +// node first despite the fact that reading order makes this seem 'on the left' +func VerifyInclusionOld( + mmrSize uint64, hasher hash.Hash, nodeHash []byte, iNode uint64, proof [][]byte, root []byte, +) bool { + ok, proofLen := VerifyFirstInclusionPathBagged(mmrSize, hasher, nodeHash, iNode, proof, root) + return ok && proofLen == len(proof) +} diff --git a/mmrtesting/go.mod b/mmrtesting/go.mod index 3220015..e89fd75 100644 --- a/mmrtesting/go.mod +++ b/mmrtesting/go.mod @@ -3,7 +3,7 @@ module github.com/datatrails/go-datatrails-merklelog/mmrtesting go 1.22 require ( - github.com/datatrails/go-datatrails-common v0.15.1 + github.com/datatrails/go-datatrails-common v0.18.0 github.com/google/uuid v1.6.0 github.com/stretchr/testify v1.9.0 ) @@ -15,54 +15,42 @@ replace ( require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.4.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 // indirect - github.com/Azure/go-amqp v1.0.0 // indirect + github.com/Azure/go-amqp v1.0.5 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/KimMachineGun/automemlimit v0.3.0 // indirect - github.com/cilium/ebpf v0.12.3 // indirect - github.com/containerd/cgroups/v3 v3.0.2 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.2 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.4 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect - github.com/openzipkin/zipkin-go v0.4.2 // indirect + github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - go.uber.org/automaxprocs v1.5.3 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/mmrtesting/go.sum b/mmrtesting/go.sum index 1f61f49..6f0c7ec 100644 --- a/mmrtesting/go.sum +++ b/mmrtesting/go.sum @@ -1,29 +1,30 @@ github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0 h1:Yoicul8bnVdQrhDMTHxdEckRGX01XvwXDHUT9zYZ3k0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0/go.mod h1:+6sju8gk8FRmSajX3Oz4G5Gm7P+mbqE9FVaXXFYTkCM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.4.0 h1:MxbPJrYY81a8xnMml4qICSq1z2WusPw3jSfdIMupnYM= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.4.0/go.mod h1:pXDkeh10bAqElvd+S5Ppncj+DCKvJGXNa8rRT2R7rIw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 h1:o/Ws6bEqMeKZUfj1RRm3mQ51O8JGU5w+Qdg2AhHib6A= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1/go.mod h1:6QAMYBAbQeeKX+REFJMZ1nFWu9XLw/PPcjYpuc9RDFs= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= -github.com/Azure/go-amqp v1.0.0 h1:QfCugi1M+4F2JDTRgVnRw7PYXLXZ9hmqk3+9+oJh3OA= -github.com/Azure/go-amqp v1.0.0/go.mod h1:+bg0x3ce5+Q3ahCEXnCsGG3ETpDQe3MEVnOuT2ywPwc= +github.com/Azure/go-amqp v1.0.5 h1:po5+ljlcNSU8xtapHTe8gIc8yHxCzC03E8afH2g1ftU= +github.com/Azure/go-amqp v1.0.5/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -37,58 +38,36 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= -github.com/KimMachineGun/automemlimit v0.3.0 h1:khgwM5ESVN85cE6Bq2ozMAAWDfrOEwQ51D/YlmThE04= -github.com/KimMachineGun/automemlimit v0.3.0/go.mod h1:pJhTW/nWJMj6SnWSU2TEKSlCaM+1N5Mej+IfS/5/Ol0= -github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= -github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= -github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= -github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/datatrails/go-datatrails-common v0.15.1 h1:wu3Gs6v7TkMLltzavPY2aHPniJabEiuqSJSHW79bX+4= -github.com/datatrails/go-datatrails-common v0.15.1/go.mod h1:lVLYVw5o+Wj+z8sn8bJBzp9qBCdYQ0DUX91+R5Gn73Q= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/datatrails/go-datatrails-common v0.18.0 h1:OeNP4EdIjhLHnE/mdN2/kp6Fq+xOnE6Y2p3DKg4xXHw= +github.com/datatrails/go-datatrails-common v0.18.0/go.mod h1:fBDqKHRLUYcictdWdLrIhKNhieKVE2r0II8vyETCuhM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= -github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= +github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1/go.mod h1:YvJ2f6MplWDhfxiUC3KpyTy76kYUZA4W3pTv/wdKQ9Y= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk= -github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -97,8 +76,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= -github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= @@ -108,95 +85,93 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 h1:uhcF5Jd7rP9DVEL10Siffyepr6SvlKbUsjH5JpNCRi8= github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0/go.mod h1:+oCZ5GXXr7KPI/DNOQORPTq5AWHfALJj9c72b0+YsEY= -github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA= -github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= +github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 h1:W12Pwm4urIbRdGhMEg2NM9O3TWKjNcxQhs46V0ypf/k= -google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= -google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 h1:ZcOkrmX74HbKFYnpPY8Qsw93fC29TbJXspYKaBkSXDQ= -google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= +nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= diff --git a/taskfiles/Taskfile_gotest.yml b/taskfiles/Taskfile_gotest.yml index b370b91..706fa42 100644 --- a/taskfiles/Taskfile_gotest.yml +++ b/taskfiles/Taskfile_gotest.yml @@ -39,7 +39,7 @@ tasks: -v \ -coverprofile={{.UNITTEST_DIR}}/main.out \ ./... \ - 2>&1 | go-junit-report -set-exit-code -debug.print-events > {{.UNITTEST_DIR}}/main.xml + 2>&1 gocov convert {{.UNITTEST_DIR}}/main.out > {{.UNITTEST_DIR}}/coverage.json @@ -62,6 +62,6 @@ tasks: -v \ -coverprofile={{.UNITTEST_DIR}}/main.out \ ./... \ - 2>&1 | go-junit-report -set-exit-code -debug.print-events > {{.UNITTEST_DIR}}/main.xml + 2>&1 gocov convert {{.UNITTEST_DIR}}/main.out > {{.UNITTEST_DIR}}/coverage.json \ No newline at end of file diff --git a/tests/go.mod b/tests/go.mod index 1a841ff..eb418df 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -11,7 +11,7 @@ replace ( ) require ( - github.com/datatrails/go-datatrails-common v0.15.1 + github.com/datatrails/go-datatrails-common v0.18.0 github.com/datatrails/go-datatrails-merklelog/massifs v0.0.0-00010101000000-000000000000 github.com/datatrails/go-datatrails-merklelog/mmr v0.0.2 github.com/stretchr/testify v1.9.0 @@ -19,60 +19,49 @@ require ( require ( github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.4.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 // indirect - github.com/Azure/go-amqp v1.0.0 // indirect + github.com/Azure/go-amqp v1.0.5 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/KimMachineGun/automemlimit v0.3.0 // indirect - github.com/cilium/ebpf v0.12.3 // indirect - github.com/containerd/cgroups/v3 v3.0.2 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/datatrails/go-datatrails-merklelog/mmrtesting v0.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/fxamacker/cbor/v2 v2.5.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.2 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/fxamacker/cbor/v2 v2.6.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.4 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/ldclabs/cose/go v0.0.0-20221214142927-d22c1cfc2154 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect - github.com/openzipkin/zipkin-go v0.4.2 // indirect + github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/veraison/go-cose v1.1.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.uber.org/automaxprocs v1.5.3 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/tests/go.sum b/tests/go.sum index 5ed859a..b62d8b1 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -1,29 +1,30 @@ github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0 h1:Yoicul8bnVdQrhDMTHxdEckRGX01XvwXDHUT9zYZ3k0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0/go.mod h1:+6sju8gk8FRmSajX3Oz4G5Gm7P+mbqE9FVaXXFYTkCM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.4.0 h1:MxbPJrYY81a8xnMml4qICSq1z2WusPw3jSfdIMupnYM= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.4.0/go.mod h1:pXDkeh10bAqElvd+S5Ppncj+DCKvJGXNa8rRT2R7rIw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 h1:o/Ws6bEqMeKZUfj1RRm3mQ51O8JGU5w+Qdg2AhHib6A= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1/go.mod h1:6QAMYBAbQeeKX+REFJMZ1nFWu9XLw/PPcjYpuc9RDFs= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= -github.com/Azure/go-amqp v1.0.0 h1:QfCugi1M+4F2JDTRgVnRw7PYXLXZ9hmqk3+9+oJh3OA= -github.com/Azure/go-amqp v1.0.0/go.mod h1:+bg0x3ce5+Q3ahCEXnCsGG3ETpDQe3MEVnOuT2ywPwc= +github.com/Azure/go-amqp v1.0.5 h1:po5+ljlcNSU8xtapHTe8gIc8yHxCzC03E8afH2g1ftU= +github.com/Azure/go-amqp v1.0.5/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -37,60 +38,38 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= -github.com/KimMachineGun/automemlimit v0.3.0 h1:khgwM5ESVN85cE6Bq2ozMAAWDfrOEwQ51D/YlmThE04= -github.com/KimMachineGun/automemlimit v0.3.0/go.mod h1:pJhTW/nWJMj6SnWSU2TEKSlCaM+1N5Mej+IfS/5/Ol0= -github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= -github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= -github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= -github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/datatrails/go-datatrails-common v0.15.1 h1:wu3Gs6v7TkMLltzavPY2aHPniJabEiuqSJSHW79bX+4= -github.com/datatrails/go-datatrails-common v0.15.1/go.mod h1:lVLYVw5o+Wj+z8sn8bJBzp9qBCdYQ0DUX91+R5Gn73Q= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/datatrails/go-datatrails-common v0.18.0 h1:OeNP4EdIjhLHnE/mdN2/kp6Fq+xOnE6Y2p3DKg4xXHw= +github.com/datatrails/go-datatrails-common v0.18.0/go.mod h1:fBDqKHRLUYcictdWdLrIhKNhieKVE2r0II8vyETCuhM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= -github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= -github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= +github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= +github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1/go.mod h1:YvJ2f6MplWDhfxiUC3KpyTy76kYUZA4W3pTv/wdKQ9Y= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk= -github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -101,8 +80,6 @@ github.com/ldclabs/cose/go v0.0.0-20221214142927-d22c1cfc2154 h1:+ANMOp3EbA4WEKS github.com/ldclabs/cose/go v0.0.0-20221214142927-d22c1cfc2154/go.mod h1:ItUTr90SrkBAvLf5UsxqN+lMfF1rw21mEcFa28XqOzQ= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= -github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= @@ -112,23 +89,23 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 h1:uhcF5Jd7rP9DVEL10Siffyepr6SvlKbUsjH5JpNCRi8= github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0/go.mod h1:+oCZ5GXXr7KPI/DNOQORPTq5AWHfALJj9c72b0+YsEY= -github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA= -github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= +github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/veraison/go-cose v1.1.0 h1:AalPS4VGiKavpAzIlBjrn7bhqXiXi4jbMYY/2+UC+4o= @@ -136,77 +113,75 @@ github.com/veraison/go-cose v1.1.0/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 h1:W12Pwm4urIbRdGhMEg2NM9O3TWKjNcxQhs46V0ypf/k= -google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= -google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 h1:ZcOkrmX74HbKFYnpPY8Qsw93fC29TbJXspYKaBkSXDQ= -google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= +nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= diff --git a/tests/massifs/localmassifreader_test.go b/tests/massifs/localmassifreader_test.go index a749111..0645440 100644 --- a/tests/massifs/localmassifreader_test.go +++ b/tests/massifs/localmassifreader_test.go @@ -7,7 +7,7 @@ package massifs import ( "context" "crypto/elliptic" - "crypto/sha256" + "errors" "strings" "testing" @@ -67,7 +67,7 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { mmrSize := mc.RangeCount() leafCount := mmr.LeafCount(mmrSize) oldLeafCount := leafCount - leavesBefore - mmrSizeOld := mmr.FirstMMRSize(mmr.TreeIndex(oldLeafCount - 1)) + mmrSizeOld := mmr.FirstMMRSize(mmr.MMRIndex(oldLeafCount - 1)) return mmrSizeOld } @@ -93,19 +93,36 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { copy(logData[i*massifs.LogEntryBytes:i*massifs.LogEntryBytes+8], tamperedBytes) } + /* + sealV0 := func( + mc *massifs.MassifContext, mmrSize uint64, tenantIdentity string, massifIndex uint32, + ) (*cose.CoseSign1Message, massifs.MMRState, error) { + root, err := mmr.GetRoot(mmrSize, mc, sha256.New()) + if err != nil { + return nil, massifs.MMRState{}, err + } + signed, state, err := tc.SignedState(tenantIdentity, uint64(massifIndex), massifs.MMRState{ + MMRSize: mmrSize, LegacySealRoot: root, + }) + // put the root back, because the benefit of the "last good seal" + // consistency check does not require access to the log data. + state.LegacySealRoot = root + return signed, state, err + }*/ seal := func( - mc *massifs.MassifContext, mmrSize uint64, tenantIdentity string, massifIndex uint32, + mc *massifs.MassifContext, mmrIndex uint64, tenantIdentity string, massifIndex uint32, ) (*cose.CoseSign1Message, massifs.MMRState, error) { - root, err := mmr.GetRoot(mmrSize, mc, sha256.New()) + peaks, err := mmr.PeakHashes(mc, mmrIndex) if err != nil { return nil, massifs.MMRState{}, err } signed, state, err := tc.SignedState(tenantIdentity, uint64(massifIndex), massifs.MMRState{ - MMRSize: mmrSize, Root: root, + Version: int(massifs.MMRStateVersion1), + MMRSize: mmrIndex + 1, Peaks: peaks, }) // put the root back, because the benefit of the "last good seal" // consistency check does not require access to the log data. - state.Root = root + state.Peaks = peaks return signed, state, err } @@ -126,10 +143,10 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { mmrSize := mc.RangeCount() leafCount := mmr.LeafCount(mmrSize) sealedLeafCount := leafCount - 8 - mmrSizeOld := mmr.FirstMMRSize(mmr.TreeIndex(sealedLeafCount - 1)) + mmrSizeOld := mmr.FirstMMRSize(mmr.MMRIndex(sealedLeafCount - 1)) require.GreaterOrEqual(t, mmrSizeOld, mc.Start.FirstIndex) - return seal(mc, mmrSizeOld, tenantIdentity, massifIndex) + return seal(mc, mmrSizeOld-1, tenantIdentity, massifIndex) case tenantId2TamperedLogUpdate: // We are simulating a situation where the locally available @@ -167,23 +184,25 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { require.GreaterOrEqual(t, mmrSizeOld, mc.Start.FirstIndex) // Get the seal before applying the tamper - msg, state, err := seal(mc, mmrSizeOld, tenantIdentity, massifIndex) + msg, state, err := seal(mc, mmrSizeOld-1, tenantIdentity, massifIndex) if err != nil { return nil, massifs.MMRState{}, err } - root, _ := mmr.GetRoot(state.MMRSize, mc, sha256.New()) - peaks := mmr.Peaks(mmrSizeOld) + peakIndices := mmr.Peaks(mmrSizeOld - 1) // Remember, the peaks are *positions* + peaks, err := mmr.PeakHashes(mc, mmrSizeOld-1) + require.NoError(t, err) // Note: we take the *last* peak, because it corresponds to the // most recent log entries, but tampering any peak will cause // the verification to fail to fail - tamperNode(mc, peaks[len(peaks)-1]-1) + tamperNode(mc, peakIndices[len(peakIndices)-1]) - root2, _ := mmr.GetRoot(state.MMRSize, mc, sha256.New()) + peaks2, err := mmr.PeakHashes(mc, mmrSizeOld-1) + require.NoError(t, err) - assert.NotEqual(t, root, root2, "tamper did not change the root") + assert.NotEqual(t, peaks, peaks2, "tamper did not change the root") // Now we can return the seal return msg, state, nil @@ -197,14 +216,14 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { require.GreaterOrEqual(t, mmrSizeOld, mc.Start.FirstIndex) // Get the seal before applying the tamper - msg, state, err := seal(mc, mmrSizeOld, tenantIdentity, massifIndex) + msg, state, err := seal(mc, mmrSizeOld-1, tenantIdentity, massifIndex) if err != nil { return nil, massifs.MMRState{}, err } // this time, tamper a peak after the seal, this simulates the // case where the extension is inconsistent with the seal. - peaks := mmr.Peaks(mc.RangeCount()) + peaks := mmr.Peaks(mc.RangeCount() - 1) // Note: we take the *last* peak, because it corresponds to the // most recent log entries. In this case we want the fresh @@ -213,14 +232,14 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { // dependent on the smallest sealed peak. // Remember, the peaks are *positions* - tamperNode(mc, peaks[len(peaks)-1]-1) + tamperNode(mc, peaks[len(peaks)-1]) // Now we can return the seal return msg, state, nil default: // Common case: the seal is the full extent of the massif - return seal(mc, mc.RangeCount(), tenantIdentity, massifIndex) + return seal(mc, mc.RangeCount()-1, tenantIdentity, massifIndex) } }) @@ -256,17 +275,17 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { mmrSizeOld := sizeBeforeLeaves(mc, 8) require.GreaterOrEqual(t, mmrSizeOld, mc.Start.FirstIndex) - peaks := mmr.Peaks(mmrSizeOld) + peaks := mmr.Peaks(mmrSizeOld - 1) // remember, the peaks are *positions* - tamperNode(mc, peaks[len(peaks)-1]-1) + tamperNode(mc, peaks[len(peaks)-1]) case tenantId3InconsistentLogUpdate: // tamper *after* the seal // this time, tamper a peak after the seal, this simulates the // case where the extension is inconsistent with the seal. - peaks := mmr.Peaks(mc.RangeCount()) + peaks := mmr.Peaks(mc.RangeCount() - 1) // Remember, the peaks are *positions* - tamperNode(mc, peaks[len(peaks)-1]-1) + tamperNode(mc, peaks[len(peaks)-1]) default: } @@ -286,14 +305,14 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { require.NoError(t, err) mmrSizeOld := sizeBeforeLeaves(mc, 8) require.GreaterOrEqual(t, mmrSizeOld, mc.Start.FirstIndex) - peaks := mmr.Peaks(mmrSizeOld) + peaks := mmr.Peaks(mmrSizeOld - 1) // remember, the peaks are *positions* - tamperNode(mc, peaks[len(peaks)-1]-1) + tamperNode(mc, peaks[len(peaks)-1]) // We call this a fake good state because its actually tampered, and the // log is "good", but it has the same effect from a verification // perspective. - _, fakeGoodState, err := seal(mc, mmrSizeOld, tenantId4RemoteInconsistentWithTrustedSeal, 0) + _, fakeGoodState, err := seal(mc, mmrSizeOld-1, tenantId4RemoteInconsistentWithTrustedSeal, 0) require.NoError(t, err) fakeECKey := massifs.TestGenerateECKey(t, elliptic.P256()) @@ -314,10 +333,11 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { wantErr error wantErrPrefix string }{ + {name: "tamper after seal", args: args{tenantIdentity: tenantId3InconsistentLogUpdate, massifIndex: 0}, wantErr: mmr.ErrConsistencyCheck}, { name: "local seal inconsistent with remote log", callOpts: []massifs.ReaderOption{massifs.WithTrustedBaseState(fakeGoodState)}, args: args{tenantIdentity: tenantId4RemoteInconsistentWithTrustedSeal, massifIndex: 0}, - wantErr: massifs.ErrInconsistentState, + wantErr: mmr.ErrConsistencyCheck, }, // provide an invalid public signing key, this simulates a remote log being signed by a different key than the verifier expects @@ -337,7 +357,6 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { }, // see the GetSignedRoot mock above for the rational behind tampering only a peak - {name: "tamper after seal", args: args{tenantIdentity: tenantId3InconsistentLogUpdate, massifIndex: 0}, wantErr: massifs.ErrInconsistentState}, {name: "seal peak tamper", args: args{tenantIdentity: tenantId2TamperedLogUpdate, massifIndex: 0}, wantErr: massifs.ErrSealVerifyFailed}, {name: "seal shorter than massif", args: args{tenantIdentity: tenantId1SealBehindLog, massifIndex: 0}}, {name: "happy path", args: args{tenantIdentity: tenantId0, massifIndex: 0}}, @@ -359,7 +378,9 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { assert.Nil(t, err, "unexpected error") } else if tt.wantErr != nil { assert.NotNil(t, err, "expected error got nil") - assert.ErrorIs(t, err, tt.wantErr) + if !errors.Is(err, tt.wantErr) { + assert.ErrorIs(t, err, tt.wantErr) + } } else if tt.wantErrPrefix != "" { assert.NotNil(t, err, "expected error got nil") assert.True(t, strings.HasPrefix(err.Error(), tt.wantErrPrefix))